From efa4988d72c69d3024ee25ad1ae87c83b9f8267e Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 19 Jul 2011 08:09:01 +0000 Subject: target: Remove unnecessary *cdb transport_get_lun_for_cmd parameter This patch removes the now unnecessary 'unsigned char *cdb' function parameter from transport_get_lun_for_cmd(). This also includes updating lio-target, tcm_loop and tcm_fc usage of transport_get_lun_for_cmd(). Reported-by: Fubo Chen Signed-off-by: Nicholas A. Bellinger --- include/target/target_core_device.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h index 52b18a5752c9..d9745bfa4429 100644 --- a/include/target/target_core_device.h +++ b/include/target/target_core_device.h @@ -1,7 +1,7 @@ #ifndef TARGET_CORE_DEVICE_H #define TARGET_CORE_DEVICE_H -extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32); +extern int transport_get_lun_for_cmd(struct se_cmd *, u32); extern int transport_get_lun_for_tmr(struct se_cmd *, u32); extern struct se_dev_entry *core_get_se_deve_from_rtpi( struct se_node_acl *, u16); -- cgit v1.2.3 From e3d6f909ed803d92a5ac9b4a2c087e0eae9b90d0 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 19 Jul 2011 08:55:10 +0000 Subject: target: Core cleanups from AGrover (round 1) This patch contains the squashed version of a number of cleanups and minor fixes from Andy's initial series (round 1) for target core this past spring. The condensed log looks like: target: use errno values instead of returning -1 for everything target: Rename transport_calc_sg_num to transport_init_task_sg target: Fix leak in error path in transport_init_task_sg target/pscsi: Remove pscsi_get_sh() usage target: Make two runtime checks into WARN_ONs target: Remove hba queue depth and convert to spin_lock_irq usage target: dev->dev_status_queue_obj is unused target: Make struct se_queue_req.cmd type struct se_cmd * target: Remove __transport_get_qr_from_queue() target: Rename se_dev->g_se_dev_list to se_dev_node target: Remove struct se_global target: Simplify scsi mib index table code target: Make dev_queue_obj a member of se_device instead of a pointer target: remove extraneous returns at end of void functions target: Ensure transport_dump_vpd_ident_type returns null-terminated str target: Function pointers don't need to use '&' to be assigned target: Fix comment in __transport_execute_tasks() target: Misc style cleanups target: rename struct pr_reservation_template to pr_reservation target: Remove #defines that just perform indirection target: Inline transport_get_task_from_execute_queue() target: Minor header comment fixes Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 8 +- drivers/target/target_core_alua.c | 222 ++-- drivers/target/target_core_cdb.c | 146 +-- drivers/target/target_core_configfs.c | 162 +-- drivers/target/target_core_device.c | 440 ++++---- drivers/target/target_core_fabric_configfs.c | 12 +- drivers/target/target_core_file.c | 45 +- drivers/target/target_core_file.h | 2 - drivers/target/target_core_hba.c | 25 +- drivers/target/target_core_iblock.c | 51 +- drivers/target/target_core_iblock.h | 1 - drivers/target/target_core_pr.c | 364 +++--- drivers/target/target_core_pr.h | 2 +- drivers/target/target_core_pscsi.c | 81 +- drivers/target/target_core_pscsi.h | 1 - drivers/target/target_core_rd.c | 66 +- drivers/target/target_core_rd.h | 2 - drivers/target/target_core_stat.c | 112 +- drivers/target/target_core_tmr.c | 84 +- drivers/target/target_core_tpg.c | 139 +-- drivers/target/target_core_transport.c | 1530 ++++++++++++-------------- drivers/target/target_core_ua.c | 32 +- drivers/target/tcm_fc/tfc_cmd.c | 7 +- drivers/target/tcm_fc/tfc_conf.c | 4 +- drivers/target/tcm_fc/tfc_io.c | 4 +- include/target/target_core_base.h | 89 +- include/target/target_core_transport.h | 12 +- 27 files changed, 1703 insertions(+), 1940 deletions(-) (limited to 'include') diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 2d0f22a91f67..2f19e1926493 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi */ if (scsi_bidi_cmnd(sc)) - T_TASK(se_cmd)->t_tasks_bidi = 1; + se_cmd->t_task->t_tasks_bidi = 1; /* * Locate the struct se_lun pointer and attach it to struct se_cmd */ @@ -176,7 +176,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) * For BIDI commands, pass in the extra READ buffer * to transport_generic_map_mem_to_cmd() below.. */ - if (T_TASK(se_cmd)->t_tasks_bidi) { + if (se_cmd->t_task->t_tasks_bidi) { struct scsi_data_buffer *sdb = scsi_in(sc); mem_bidi_ptr = (void *)sdb->table.sgl; @@ -1402,9 +1402,9 @@ static int tcm_loop_register_configfs(void) * Register the top level struct config_item_type with TCM core */ fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); - if (!fabric) { + if (IS_ERR(fabric)) { printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); - return -1; + return PTR_ERR(fabric); } /* * Setup the fabric API of function pointers used by target_core_mod diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 47abb42d9c36..bfc42adea510 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -46,6 +46,14 @@ static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, struct se_port *port, int explict, int offline); +static u16 alua_lu_gps_counter; +static u32 alua_lu_gps_count; + +static DEFINE_SPINLOCK(lu_gps_lock); +static LIST_HEAD(lu_gps_list); + +struct t10_alua_lu_gp *default_lu_gp; + /* * REPORT_TARGET_PORT_GROUPS * @@ -53,16 +61,16 @@ static int core_alua_set_tg_pt_secondary_state( */ int core_emulate_report_target_port_groups(struct se_cmd *cmd) { - struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; + struct se_subsystem_dev *su_dev = cmd->se_lun->lun_se_dev->se_sub_dev; struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); - list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { /* * PREF: Preferred target port bit, determine if this @@ -124,7 +132,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); } - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); /* * Set the RETURN DATA LENGTH set in the header of the DataIN Payload */ @@ -143,13 +151,13 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) */ int core_emulate_set_target_port_groups(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev; - struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep; - struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl; + struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_subsystem_dev *su_dev = dev->se_sub_dev; + struct se_port *port, *l_port = cmd->se_lun->lun_sep; + struct se_node_acl *nacl = cmd->se_sess->se_node_acl; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ u32 len = 4; /* Skip over RESERVED area in header */ int alua_access_state, primary = 0, rc; @@ -224,9 +232,9 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * Locate the matching target port group ID from * the global tg_pt_gp list */ - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, - &T10_ALUA(su_dev)->tg_pt_gps_list, + &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (!(tg_pt_gp->tg_pt_gp_valid_id)) continue; @@ -236,18 +244,18 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_inc(); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); rc = core_alua_do_port_transition(tg_pt_gp, dev, l_port, nacl, alua_access_state, 1); - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_dec(); break; } - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); /* * If not matching target port group ID can be located * throw an exception with ASCQ: INVALID_PARAMETER_LIST @@ -464,7 +472,7 @@ static int core_alua_state_check( unsigned char *cdb, u8 *alua_ascq) { - struct se_lun *lun = SE_LUN(cmd); + struct se_lun *lun = cmd->se_lun; struct se_port *port = lun->lun_sep; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; @@ -522,7 +530,7 @@ static int core_alua_state_check( default: printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", out_alua_state); - return -1; + return -EINVAL; } return 0; @@ -553,7 +561,7 @@ static int core_alua_check_transition(int state, int *primary) break; default: printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); - return -1; + return -EINVAL; } return 0; @@ -866,9 +874,9 @@ int core_alua_do_port_transition( smp_mb__after_atomic_inc(); spin_unlock(&lu_gp->lu_gp_lock); - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, - &T10_ALUA(su_dev)->tg_pt_gps_list, + &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (!(tg_pt_gp->tg_pt_gp_valid_id)) @@ -893,7 +901,7 @@ int core_alua_do_port_transition( } atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_inc(); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); /* * core_alua_do_transition_tg_pt() will always return * success. @@ -901,11 +909,11 @@ int core_alua_do_port_transition( core_alua_do_transition_tg_pt(tg_pt_gp, port, nacl, md_buf, new_state, explict); - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); smp_mb__after_atomic_dec(); } - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); spin_lock(&lu_gp->lu_gp_lock); atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); @@ -942,11 +950,11 @@ static int core_alua_update_tpg_secondary_metadata( memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN); len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s", - TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg)); + se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg)); - if (TPG_TFO(se_tpg)->tpg_get_tag != NULL) + if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu", - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n" "alua_tg_pt_status=0x%02x\n", @@ -954,7 +962,7 @@ static int core_alua_update_tpg_secondary_metadata( port->sep_tg_pt_secondary_stat); snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u", - TPG_TFO(se_tpg)->get_fabric_name(), wwn, + se_tpg->se_tpg_tfo->get_fabric_name(), wwn, port->sep_lun->unpacked_lun); return core_alua_write_tpg_metadata(path, md_buf, len); @@ -977,7 +985,7 @@ static int core_alua_set_tg_pt_secondary_state( spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); printk(KERN_ERR "Unable to complete secondary state" " transition\n"); - return -1; + return -EINVAL; } trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; /* @@ -1015,7 +1023,7 @@ static int core_alua_set_tg_pt_secondary_state( if (!(md_buf)) { printk(KERN_ERR "Unable to allocate md_buf for" " secondary ALUA access metadata\n"); - return -1; + return -ENOMEM; } mutex_lock(&port->sep_tg_pt_md_mutex); core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port, @@ -1038,15 +1046,15 @@ core_alua_allocate_lu_gp(const char *name, int def_group) printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); return ERR_PTR(-ENOMEM); } - INIT_LIST_HEAD(&lu_gp->lu_gp_list); + INIT_LIST_HEAD(&lu_gp->lu_gp_node); INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list); spin_lock_init(&lu_gp->lu_gp_lock); atomic_set(&lu_gp->lu_gp_ref_cnt, 0); if (def_group) { - lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++; + lu_gp->lu_gp_id = alua_lu_gps_counter++; lu_gp->lu_gp_valid_id = 1; - se_global->alua_lu_gps_count++; + alua_lu_gps_count++; } return lu_gp; @@ -1062,22 +1070,22 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) if (lu_gp->lu_gp_valid_id) { printk(KERN_WARNING "ALUA LU Group already has a valid ID," " ignoring request\n"); - return -1; + return -EINVAL; } - spin_lock(&se_global->lu_gps_lock); - if (se_global->alua_lu_gps_count == 0x0000ffff) { - printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:" + spin_lock(&lu_gps_lock); + if (alua_lu_gps_count == 0x0000ffff) { + printk(KERN_ERR "Maximum ALUA alua_lu_gps_count:" " 0x0000ffff reached\n"); - spin_unlock(&se_global->lu_gps_lock); + spin_unlock(&lu_gps_lock); kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); - return -1; + return -ENOSPC; } again: lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id : - se_global->alua_lu_gps_counter++; + alua_lu_gps_counter++; - list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) { + list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { if (!(lu_gp_id)) goto again; @@ -1085,16 +1093,16 @@ again: printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" " already exists, ignoring request\n", lu_gp_id); - spin_unlock(&se_global->lu_gps_lock); - return -1; + spin_unlock(&lu_gps_lock); + return -EINVAL; } } lu_gp->lu_gp_id = lu_gp_id_tmp; lu_gp->lu_gp_valid_id = 1; - list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list); - se_global->alua_lu_gps_count++; - spin_unlock(&se_global->lu_gps_lock); + list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list); + alua_lu_gps_count++; + spin_unlock(&lu_gps_lock); return 0; } @@ -1130,11 +1138,11 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * no associations can be made while we are releasing * struct t10_alua_lu_gp. */ - spin_lock(&se_global->lu_gps_lock); + spin_lock(&lu_gps_lock); atomic_set(&lu_gp->lu_gp_shutdown, 1); - list_del(&lu_gp->lu_gp_list); - se_global->alua_lu_gps_count--; - spin_unlock(&se_global->lu_gps_lock); + list_del(&lu_gp->lu_gp_node); + alua_lu_gps_count--; + spin_unlock(&lu_gps_lock); /* * Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name() * in target_core_configfs.c:target_core_store_alua_lu_gp() to be @@ -1165,9 +1173,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) * we want to re-assocate a given lu_gp_mem with default_lu_gp. */ spin_lock(&lu_gp_mem->lu_gp_mem_lock); - if (lu_gp != se_global->default_lu_gp) + if (lu_gp != default_lu_gp) __core_alua_attach_lu_gp_mem(lu_gp_mem, - se_global->default_lu_gp); + default_lu_gp); else lu_gp_mem->lu_gp = NULL; spin_unlock(&lu_gp_mem->lu_gp_mem_lock); @@ -1182,7 +1190,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp) void core_alua_free_lu_gp_mem(struct se_device *dev) { struct se_subsystem_dev *su_dev = dev->se_sub_dev; - struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua *alua = &su_dev->t10_alua; struct t10_alua_lu_gp *lu_gp; struct t10_alua_lu_gp_member *lu_gp_mem; @@ -1218,27 +1226,27 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) struct t10_alua_lu_gp *lu_gp; struct config_item *ci; - spin_lock(&se_global->lu_gps_lock); - list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) { + spin_lock(&lu_gps_lock); + list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { if (!(lu_gp->lu_gp_valid_id)) continue; ci = &lu_gp->lu_gp_group.cg_item; if (!(strcmp(config_item_name(ci), name))) { atomic_inc(&lu_gp->lu_gp_ref_cnt); - spin_unlock(&se_global->lu_gps_lock); + spin_unlock(&lu_gps_lock); return lu_gp; } } - spin_unlock(&se_global->lu_gps_lock); + spin_unlock(&lu_gps_lock); return NULL; } void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp) { - spin_lock(&se_global->lu_gps_lock); + spin_lock(&lu_gps_lock); atomic_dec(&lu_gp->lu_gp_ref_cnt); - spin_unlock(&se_global->lu_gps_lock); + spin_unlock(&lu_gps_lock); } /* @@ -1304,14 +1312,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS; if (def_group) { - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); tg_pt_gp->tg_pt_gp_id = - T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; + su_dev->t10_alua.alua_tg_pt_gps_counter++; tg_pt_gp->tg_pt_gp_valid_id = 1; - T10_ALUA(su_dev)->alua_tg_pt_gps_count++; + su_dev->t10_alua.alua_tg_pt_gps_count++; list_add_tail(&tg_pt_gp->tg_pt_gp_list, - &T10_ALUA(su_dev)->tg_pt_gps_list); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + &su_dev->t10_alua.tg_pt_gps_list); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); } return tg_pt_gp; @@ -1330,22 +1338,22 @@ int core_alua_set_tg_pt_gp_id( if (tg_pt_gp->tg_pt_gp_valid_id) { printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," " ignoring request\n"); - return -1; + return -EINVAL; } - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); - if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) { + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); + if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" " 0x0000ffff reached\n"); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); - return -1; + return -ENOSPC; } again: tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id : - T10_ALUA(su_dev)->alua_tg_pt_gps_counter++; + su_dev->t10_alua.alua_tg_pt_gps_counter++; - list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list, + list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { if (!(tg_pt_gp_id)) @@ -1353,17 +1361,17 @@ again: printk(KERN_ERR "ALUA Target Port Group ID: %hu already" " exists, ignoring request\n", tg_pt_gp_id); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); - return -1; + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); + return -EINVAL; } } tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; tg_pt_gp->tg_pt_gp_valid_id = 1; list_add_tail(&tg_pt_gp->tg_pt_gp_list, - &T10_ALUA(su_dev)->tg_pt_gps_list); - T10_ALUA(su_dev)->alua_tg_pt_gps_count++; - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + &su_dev->t10_alua.tg_pt_gps_list); + su_dev->t10_alua.alua_tg_pt_gps_count++; + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); return 0; } @@ -1403,10 +1411,10 @@ void core_alua_free_tg_pt_gp( * no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS * can be made while we are releasing struct t10_alua_tg_pt_gp. */ - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_del(&tg_pt_gp->tg_pt_gp_list); - T10_ALUA(su_dev)->alua_tg_pt_gps_counter--; - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + su_dev->t10_alua.alua_tg_pt_gps_counter--; + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); /* * Allow a struct t10_alua_tg_pt_gp_member * referenced by * core_alua_get_tg_pt_gp_by_name() in @@ -1438,9 +1446,9 @@ void core_alua_free_tg_pt_gp( * default_tg_pt_gp. */ spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); - if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) { + if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) { __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, - T10_ALUA(su_dev)->default_tg_pt_gp); + su_dev->t10_alua.default_tg_pt_gp); } else tg_pt_gp_mem->tg_pt_gp = NULL; spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -1455,7 +1463,7 @@ void core_alua_free_tg_pt_gp( void core_alua_free_tg_pt_gp_mem(struct se_port *port) { struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; - struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua *alua = &su_dev->t10_alua; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; @@ -1493,19 +1501,19 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( struct t10_alua_tg_pt_gp *tg_pt_gp; struct config_item *ci; - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); - list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list, + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (!(tg_pt_gp->tg_pt_gp_valid_id)) continue; ci = &tg_pt_gp->tg_pt_gp_group.cg_item; if (!(strcmp(config_item_name(ci), name))) { atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); return tg_pt_gp; } } - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); return NULL; } @@ -1515,9 +1523,9 @@ static void core_alua_put_tg_pt_gp_from_name( { struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev; - spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); - spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock); + spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); } /* @@ -1555,7 +1563,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) { struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev; struct config_item *tg_pt_ci; - struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua *alua = &su_dev->t10_alua; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; ssize_t len = 0; @@ -1605,10 +1613,10 @@ ssize_t core_alua_store_tg_pt_gp_info( tpg = port->sep_tpg; lun = port->sep_lun; - if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { + if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" - " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item)); return -EINVAL; } @@ -1654,8 +1662,8 @@ ssize_t core_alua_store_tg_pt_gp_info( " %s/tpgt_%hu/%s from ALUA Target Port Group:" " alua/%s, ID: %hu back to" " default_tg_pt_gp\n", - TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item), config_item_name( &tg_pt_gp->tg_pt_gp_group.cg_item), @@ -1663,7 +1671,7 @@ ssize_t core_alua_store_tg_pt_gp_info( __core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp); __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, - T10_ALUA(su_dev)->default_tg_pt_gp); + su_dev->t10_alua.default_tg_pt_gp); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); return count; @@ -1681,8 +1689,8 @@ ssize_t core_alua_store_tg_pt_gp_info( spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" " Target Port Group: alua/%s, ID: %hu\n", (move) ? - "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item), config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item), tg_pt_gp_new->tg_pt_gp_id); @@ -1939,7 +1947,7 @@ ssize_t core_alua_store_secondary_write_metadata( int core_setup_alua(struct se_device *dev, int force_pt) { struct se_subsystem_dev *su_dev = dev->se_sub_dev; - struct t10_alua *alua = T10_ALUA(su_dev); + struct t10_alua *alua = &su_dev->t10_alua; struct t10_alua_lu_gp_member *lu_gp_mem; /* * If this device is from Target_Core_Mod/pSCSI, use the ALUA logic @@ -1947,44 +1955,44 @@ int core_setup_alua(struct se_device *dev, int force_pt) * cause a problem because libata and some SATA RAID HBAs appear * under Linux/SCSI, but emulate SCSI logic themselves. */ - if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && - !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) { + if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && + !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { alua->alua_type = SPC_ALUA_PASSTHROUGH; alua->alua_state_check = &core_alua_state_check_nop; printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" - " emulation\n", TRANSPORT(dev)->name); + " emulation\n", dev->transport->name); return 0; } /* * If SPC-3 or above is reported by real or emulated struct se_device, * use emulated ALUA. */ - if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { + if (dev->transport->get_device_rev(dev) >= SCSI_3) { printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" - " device\n", TRANSPORT(dev)->name); + " device\n", dev->transport->name); /* * Associate this struct se_device with the default ALUA * LUN Group. */ lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); - if (IS_ERR(lu_gp_mem) || !lu_gp_mem) - return -1; + if (IS_ERR(lu_gp_mem)) + return PTR_ERR(lu_gp_mem); alua->alua_type = SPC3_ALUA_EMULATED; alua->alua_state_check = &core_alua_state_check; spin_lock(&lu_gp_mem->lu_gp_mem_lock); __core_alua_attach_lu_gp_mem(lu_gp_mem, - se_global->default_lu_gp); + default_lu_gp); spin_unlock(&lu_gp_mem->lu_gp_mem_lock); printk(KERN_INFO "%s: Adding to default ALUA LU Group:" " core/alua/lu_gps/default_lu_gp\n", - TRANSPORT(dev)->name); + dev->transport->name); } else { alua->alua_type = SPC2_ALUA_DISABLED; alua->alua_state_check = &core_alua_state_check_nop; printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" - " device\n", TRANSPORT(dev)->name); + " device\n", dev->transport->name); } return 0; diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 7f19c8b7b84c..7d9ccf3aa9c3 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -64,8 +64,8 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf) static int target_emulate_inquiry_std(struct se_cmd *cmd) { - struct se_lun *lun = SE_LUN(cmd); - struct se_device *dev = SE_DEV(cmd); + struct se_lun *lun = cmd->se_lun; + struct se_device *dev = cmd->se_lun->lun_se_dev; unsigned char *buf = cmd->t_task->t_task_buf; /* @@ -75,7 +75,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) if (cmd->data_length < 6) { printk(KERN_ERR "SCSI Inquiry payload length: %u" " too small for EVPD=0\n", cmd->data_length); - return -1; + return -EINVAL; } buf[0] = dev->transport->get_device_type(dev); @@ -86,7 +86,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) /* * Enable SCCS and TPGS fields for Emulated ALUA */ - if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED) + if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) target_fill_alua_data(lun->lun_sep, buf); if (cmd->data_length < 8) { @@ -107,9 +107,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd) snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); snprintf((unsigned char *)&buf[16], 16, "%s", - &DEV_T10_WWN(dev)->model[0]); + &dev->se_sub_dev->t10_wwn.model[0]); snprintf((unsigned char *)&buf[32], 4, "%s", - &DEV_T10_WWN(dev)->revision[0]); + &dev->se_sub_dev->t10_wwn.revision[0]); buf[4] = 31; /* Set additional length to 31 */ return 0; } @@ -128,7 +128,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) * Registered Extended LUN WWN has been set via ConfigFS * during device creation/restart. */ - if (SE_DEV(cmd)->se_sub_dev->su_dev_flags & + if (cmd->se_lun->lun_se_dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL) { buf[3] = 3; buf[5] = 0x80; @@ -143,7 +143,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; u16 len = 0; buf[1] = 0x80; @@ -152,7 +152,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) u32 unit_serial_len; unit_serial_len = - strlen(&DEV_T10_WWN(dev)->unit_serial[0]); + strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); unit_serial_len++; /* For NULL Terminator */ if (((len + 4) + unit_serial_len) > cmd->data_length) { @@ -162,7 +162,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) return 0; } len += sprintf((unsigned char *)&buf[4], "%s", - &DEV_T10_WWN(dev)->unit_serial[0]); + &dev->se_sub_dev->t10_wwn.unit_serial[0]); len++; /* Extra Byte for NULL Terminator */ buf[3] = len; } @@ -176,15 +176,15 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = SE_DEV(cmd); - struct se_lun *lun = SE_LUN(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_lun *lun = cmd->se_lun; struct se_port *port = NULL; struct se_portal_group *tpg = NULL; struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; unsigned char binary, binary_new; - unsigned char *prod = &DEV_T10_WWN(dev)->model[0]; + unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; u32 prod_len; u32 unit_serial_len, off = 0; int i; @@ -238,11 +238,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION */ binary = transport_asciihex_to_binaryhex( - &DEV_T10_WWN(dev)->unit_serial[0]); + &dev->se_sub_dev->t10_wwn.unit_serial[0]); buf[off++] |= (binary & 0xf0) >> 4; for (i = 0; i < 24; i += 2) { binary_new = transport_asciihex_to_binaryhex( - &DEV_T10_WWN(dev)->unit_serial[i+2]); + &dev->se_sub_dev->t10_wwn.unit_serial[i+2]); buf[off] = (binary & 0x0f) << 4; buf[off++] |= (binary_new & 0xf0) >> 4; binary = binary_new; @@ -263,7 +263,7 @@ check_t10_vend_desc: if (dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL) { unit_serial_len = - strlen(&DEV_T10_WWN(dev)->unit_serial[0]); + strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]); unit_serial_len++; /* For NULL Terminator */ if ((len + (id_len + 4) + @@ -274,7 +274,7 @@ check_t10_vend_desc: } id_len += sprintf((unsigned char *)&buf[off+12], "%s:%s", prod, - &DEV_T10_WWN(dev)->unit_serial[0]); + &dev->se_sub_dev->t10_wwn.unit_serial[0]); } buf[off] = 0x2; /* ASCII */ buf[off+1] = 0x1; /* T10 Vendor ID */ @@ -312,7 +312,7 @@ check_port: goto check_tpgi; } buf[off] = - (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x1; /* CODE SET == Binary */ buf[off] = 0x80; /* Set PIV=1 */ /* Set ASSOICATION == target port: 01b */ @@ -335,7 +335,7 @@ check_port: * section 7.5.1 Table 362 */ check_tpgi: - if (T10_ALUA(dev->se_sub_dev)->alua_type != + if (dev->se_sub_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) goto check_scsi_name; @@ -357,7 +357,7 @@ check_tpgi: spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); buf[off] = - (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x1; /* CODE SET == Binary */ buf[off] = 0x80; /* Set PIV=1 */ /* Set ASSOICATION == target port: 01b */ @@ -409,7 +409,7 @@ check_lu_gp: * section 7.5.1 Table 362 */ check_scsi_name: - scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg)); + scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg)); /* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */ scsi_name_len += 10; /* Check for 4-byte padding */ @@ -424,7 +424,7 @@ check_scsi_name: goto set_len; } buf[off] = - (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4); + (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); buf[off++] |= 0x3; /* CODE SET == UTF-8 */ buf[off] = 0x80; /* Set PIV=1 */ /* Set ASSOICATION == target port: 01b */ @@ -438,9 +438,9 @@ check_scsi_name: * Target Port, this means ",t,0x in * UTF-8 encoding. */ - tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); + tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", - TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt); + tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); scsi_name_len += 1 /* Include NULL terminator */; /* * The null-terminated, null-padded (see 4.4.2) SCSI @@ -477,7 +477,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) buf[5] = 0x07; /* If WriteCache emulation is enabled, set V_SUP */ - if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0) + if (cmd->se_lun->lun_se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) buf[6] = 0x01; return 0; } @@ -486,7 +486,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; int have_tp = 0; /* @@ -494,14 +494,14 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) * emulate_tpu=1 or emulate_tpws=1 we will be expect a * different page length for Thin Provisioning. */ - if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) have_tp = 1; if (cmd->data_length < (0x10 + 4)) { printk(KERN_INFO "Received data_length: %u" " too small for EVPD 0xb0\n", cmd->data_length); - return -1; + return -EINVAL; } if (have_tp && cmd->data_length < (0x3c + 4)) { @@ -523,12 +523,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM TRANSFER LENGTH */ - put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]); + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH */ - put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]); + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]); /* * Exit now if we don't support TP or the initiator sent a too @@ -540,25 +540,25 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM UNMAP LBA COUNT */ - put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]); + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]); /* * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT */ - put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count, + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count, &buf[24]); /* * Set OPTIMAL UNMAP GRANULARITY */ - put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]); + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]); /* * UNMAP GRANULARITY ALIGNMENT */ - put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment, + put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment, &buf[32]); - if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0) + if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0) buf[32] |= 0x80; /* Set the UGAVALID bit */ return 0; @@ -568,7 +568,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; /* * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: @@ -602,7 +602,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) * the UNMAP command (see 5.25). A TPU bit set to zero indicates * that the device server does not support the UNMAP command. */ - if (DEV_ATTRIB(dev)->emulate_tpu != 0) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0) buf[5] = 0x80; /* @@ -611,7 +611,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) * A TPWS bit set to zero indicates that the device server does not * support the use of the WRITE SAME (16) command to unmap LBAs. */ - if (DEV_ATTRIB(dev)->emulate_tpws != 0) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0) buf[5] |= 0x40; return 0; @@ -620,7 +620,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_inquiry(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; unsigned char *buf = cmd->t_task->t_task_buf; unsigned char *cdb = cmd->t_task->t_task_cdb; @@ -637,7 +637,7 @@ target_emulate_inquiry(struct se_cmd *cmd) if (cmd->data_length < 4) { printk(KERN_ERR "SCSI Inquiry payload length: %u" " too small for EVPD=1\n", cmd->data_length); - return -1; + return -EINVAL; } buf[0] = dev->transport->get_device_type(dev); @@ -656,7 +656,7 @@ target_emulate_inquiry(struct se_cmd *cmd) return target_emulate_evpd_b2(cmd, buf); default: printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); - return -1; + return -EINVAL; } return 0; @@ -665,7 +665,7 @@ target_emulate_inquiry(struct se_cmd *cmd) static int target_emulate_readcapacity(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; unsigned char *buf = cmd->t_task->t_task_buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); u32 blocks; @@ -679,14 +679,14 @@ target_emulate_readcapacity(struct se_cmd *cmd) buf[1] = (blocks >> 16) & 0xff; buf[2] = (blocks >> 8) & 0xff; buf[3] = blocks & 0xff; - buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; - buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; - buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; - buf[7] = DEV_ATTRIB(dev)->block_size & 0xff; + buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; + buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; + buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; + buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; /* * Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16 */ - if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) put_unaligned_be32(0xFFFFFFFF, &buf[0]); return 0; @@ -695,7 +695,7 @@ target_emulate_readcapacity(struct se_cmd *cmd) static int target_emulate_readcapacity_16(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; unsigned char *buf = cmd->t_task->t_task_buf; unsigned long long blocks = dev->transport->get_blocks(dev); @@ -707,15 +707,15 @@ target_emulate_readcapacity_16(struct se_cmd *cmd) buf[5] = (blocks >> 16) & 0xff; buf[6] = (blocks >> 8) & 0xff; buf[7] = blocks & 0xff; - buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff; - buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff; - buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff; - buf[11] = DEV_ATTRIB(dev)->block_size & 0xff; + buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff; + buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff; + buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff; + buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff; /* * Set Thin Provisioning Enable bit following sbc3r22 in section * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled. */ - if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) buf[14] = 0x80; return 0; @@ -765,8 +765,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p) * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless * to the number of commands completed with one of those status codes. */ - p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 : - (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; + p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : + (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; /* * From spc4r17, section 7.4.6 Control mode Page * @@ -779,7 +779,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p) * which the command was received shall be completed with TASK ABORTED * status (see SAM-4). */ - p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00; + p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00; p[8] = 0xff; p[9] = 0xff; p[11] = 30; @@ -792,7 +792,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p) { p[0] = 0x08; p[1] = 0x12; - if (DEV_ATTRIB(dev)->emulate_write_cache > 0) + if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) p[2] = 0x04; /* Write Cache Enable */ p[12] = 0x20; /* Disabled Read Ahead */ @@ -830,7 +830,7 @@ target_modesense_dpofua(unsigned char *buf, int type) static int target_emulate_modesense(struct se_cmd *cmd, int ten) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; char *cdb = cmd->t_task->t_task_cdb; unsigned char *rbuf = cmd->t_task->t_task_buf; int type = dev->transport->get_device_type(dev); @@ -867,13 +867,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) buf[0] = (offset >> 8) & 0xff; buf[1] = offset & 0xff; - if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || + if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || (cmd->se_deve && (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) target_modesense_write_protect(&buf[3], type); - if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && - (DEV_ATTRIB(dev)->emulate_fua_write > 0)) + if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && + (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) target_modesense_dpofua(&buf[3], type); if ((offset + 2) > cmd->data_length) @@ -883,13 +883,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) offset -= 1; buf[0] = offset & 0xff; - if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || + if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || (cmd->se_deve && (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) target_modesense_write_protect(&buf[2], type); - if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) && - (DEV_ATTRIB(dev)->emulate_fua_write > 0)) + if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) && + (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0)) target_modesense_dpofua(&buf[2], type); if ((offset + 1) > cmd->data_length) @@ -963,8 +963,8 @@ target_emulate_request_sense(struct se_cmd *cmd) static int target_emulate_unmap(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); - struct se_device *dev = SE_DEV(cmd); + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_lun->lun_se_dev; unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; sector_t lba; @@ -991,7 +991,7 @@ target_emulate_unmap(struct se_task *task) if (ret < 0) { printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", ret); - return -1; + return ret; } ptr += 16; @@ -1010,13 +1010,13 @@ target_emulate_unmap(struct se_task *task) static int target_emulate_write_same(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); - struct se_device *dev = SE_DEV(cmd); + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_lun->lun_se_dev; sector_t lba = cmd->t_task->t_task_lba; unsigned int range; int ret; - range = (cmd->data_length / DEV_ATTRIB(dev)->block_size); + range = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n", (unsigned long long)lba, range); @@ -1024,7 +1024,7 @@ target_emulate_write_same(struct se_task *task) ret = dev->transport->do_discard(dev, lba, range); if (ret < 0) { printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); - return -1; + return ret; } task->task_scsi_status = GOOD; @@ -1035,8 +1035,8 @@ target_emulate_write_same(struct se_task *task) int transport_emulate_control_cdb(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); - struct se_device *dev = SE_DEV(cmd); + struct se_cmd *cmd = task->task_se_cmd; + struct se_device *dev = cmd->se_lun->lun_se_dev; unsigned short service_action; int ret = 0; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index c6140004307b..64418efa671b 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -52,6 +53,8 @@ #include "target_core_rd.h" #include "target_core_stat.h" +extern struct t10_alua_lu_gp *default_lu_gp; + static struct list_head g_tf_list; static struct mutex g_tf_lock; @@ -61,6 +64,13 @@ struct target_core_configfs_attribute { ssize_t (*store)(void *, const char *, size_t); }; +static struct config_group target_core_hbagroup; +static struct config_group alua_group; +static struct config_group alua_lu_gps_group; + +static DEFINE_SPINLOCK(se_device_lock); +static LIST_HEAD(se_dev_list); + static inline struct se_hba * item_to_hba(struct config_item *item) { @@ -298,21 +308,21 @@ struct target_fabric_configfs *target_fabric_configfs_init( if (!(fabric_mod)) { printk(KERN_ERR "Missing struct module *fabric_mod pointer\n"); - return NULL; + return ERR_PTR(-EINVAL); } if (!(name)) { printk(KERN_ERR "Unable to locate passed fabric name\n"); - return NULL; + return ERR_PTR(-EINVAL); } if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" "_NAME_SIZE\n", name); - return NULL; + return ERR_PTR(-EINVAL); } tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); if (!(tf)) - return NULL; + return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&tf->tf_list); atomic_set(&tf->tf_access_cnt, 0); @@ -591,7 +601,6 @@ void target_fabric_configfs_deregister( printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" ">>>>>\n"); - return; } EXPORT_SYMBOL(target_fabric_configfs_deregister); @@ -616,7 +625,8 @@ static ssize_t target_core_dev_show_attr_##_name( \ spin_unlock(&se_dev->se_dev_lock); \ return -ENODEV; \ } \ - rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \ + rb = snprintf(page, PAGE_SIZE, "%u\n", \ + (u32)dev->se_sub_dev->se_dev_attrib._name); \ spin_unlock(&se_dev->se_dev_lock); \ \ return rb; \ @@ -1078,7 +1088,7 @@ static ssize_t target_core_dev_pr_show_spc3_res( PR_REG_ISID_ID_LEN); *len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n", - TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), + se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); spin_unlock(&dev->dev_reservation_lock); @@ -1100,7 +1110,7 @@ static ssize_t target_core_dev_pr_show_spc2_res( return *len; } *len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n", - TPG_TFO(se_nacl->se_tpg)->get_fabric_name(), + se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), se_nacl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -1116,7 +1126,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder( if (!(su_dev->se_dev_ptr)) return -ENODEV; - switch (T10_RES(su_dev)->res_type) { + switch (su_dev->t10_pr.res_type) { case SPC3_PERSISTENT_RESERVATIONS: target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr, page, &len); @@ -1153,7 +1163,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( if (!(dev)) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return len; spin_lock(&dev->dev_reservation_lock); @@ -1190,10 +1200,10 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation( if (!(su_dev->se_dev_ptr)) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; - return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation); + return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation); } SE_DEV_PR_ATTR_RO(res_pr_generation); @@ -1217,7 +1227,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( if (!(dev)) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return len; spin_lock(&dev->dev_reservation_lock); @@ -1230,7 +1240,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( se_nacl = pr_reg->pr_reg_nacl; se_tpg = se_nacl->se_tpg; lun = pr_reg->pr_reg_tg_pt_lun; - tfo = TPG_TFO(se_tpg); + tfo = se_tpg->se_tpg_tfo; len += sprintf(page+len, "SPC-3 Reservation: %s" " Target Node Endpoint: %s\n", tfo->get_fabric_name(), @@ -1264,13 +1274,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( if (!(su_dev->se_dev_ptr)) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return len; len += sprintf(page+len, "SPC-3 PR Registrations:\n"); - spin_lock(&T10_RES(su_dev)->registration_lock); - list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + spin_lock(&su_dev->t10_pr.registration_lock); + list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, pr_reg_list) { memset(buf, 0, 384); @@ -1290,7 +1300,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( len += sprintf(page+len, "%s", buf); reg_count++; } - spin_unlock(&T10_RES(su_dev)->registration_lock); + spin_unlock(&su_dev->t10_pr.registration_lock); if (!(reg_count)) len += sprintf(page+len, "None\n"); @@ -1315,7 +1325,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type( if (!(dev)) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return len; spin_lock(&dev->dev_reservation_lock); @@ -1346,7 +1356,7 @@ static ssize_t target_core_dev_pr_show_attr_res_type( if (!(su_dev->se_dev_ptr)) return -ENODEV; - switch (T10_RES(su_dev)->res_type) { + switch (su_dev->t10_pr.res_type) { case SPC3_PERSISTENT_RESERVATIONS: len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); break; @@ -1377,11 +1387,11 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( if (!(su_dev->se_dev_ptr)) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; return sprintf(page, "APTPL Bit Status: %s\n", - (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled"); + (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled"); } SE_DEV_PR_ATTR_RO(res_aptpl_active); @@ -1396,7 +1406,7 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( if (!(su_dev->se_dev_ptr)) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; return sprintf(page, "Ready to process PR APTPL metadata..\n"); @@ -1448,7 +1458,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( if (!(dev)) return -ENODEV; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; if (atomic_read(&dev->dev_export_obj.obj_access_count)) { @@ -1594,7 +1604,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( goto out; } - ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key, + ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key, i_port, isid, mapped_lun, t_port, tpgt, target_lun, res_holder, all_tg_pt, type); out: @@ -1842,7 +1852,7 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page) if (!(dev)) return -ENODEV; - if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) + if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) return len; lu_gp_mem = dev->dev_alua_lu_gp_mem; @@ -1881,7 +1891,7 @@ static ssize_t target_core_store_alua_lu_gp( if (!(dev)) return -ENODEV; - if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) { + if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&su_dev->se_dev_group.cg_item)); @@ -2557,9 +2567,9 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members( lun = port->sep_lun; cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu" - "/%s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + "/%s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item)); cur_len++; /* Extra byte for NULL terminator */ @@ -2748,17 +2758,17 @@ static struct config_group *target_core_make_subdev( " struct se_subsystem_dev\n"); goto unlock; } - INIT_LIST_HEAD(&se_dev->g_se_dev_list); + INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); - INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); - INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); - spin_lock_init(&se_dev->t10_reservation.registration_lock); - spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); + INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); + INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); + spin_lock_init(&se_dev->t10_pr.registration_lock); + spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); spin_lock_init(&se_dev->se_dev_lock); - se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; + se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; se_dev->t10_wwn.t10_sub_dev = se_dev; se_dev->t10_alua.t10_sub_dev = se_dev; se_dev->se_dev_attrib.da_sub_dev = se_dev; @@ -2784,9 +2794,9 @@ static struct config_group *target_core_make_subdev( " from allocate_virtdevice()\n"); goto out; } - spin_lock(&se_global->g_device_lock); - list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list); - spin_unlock(&se_global->g_device_lock); + spin_lock(&se_device_lock); + list_add_tail(&se_dev->se_dev_node, &se_dev_list); + spin_unlock(&se_device_lock); config_group_init_type_name(&se_dev->se_dev_group, name, &target_core_dev_cit); @@ -2814,7 +2824,7 @@ static struct config_group *target_core_make_subdev( if (!(tg_pt_gp)) goto out; - tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; + tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!(tg_pt_gp_cg->default_groups)) { @@ -2827,11 +2837,11 @@ static struct config_group *target_core_make_subdev( "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit); tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group; tg_pt_gp_cg->default_groups[1] = NULL; - T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp; + se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp; /* * Add core/$HBA/$DEV/statistics/ default groups */ - dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; + dev_stat_grp = &se_dev->dev_stat_grps.stat_group; dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, GFP_KERNEL); if (!dev_stat_grp->default_groups) { @@ -2846,9 +2856,9 @@ static struct config_group *target_core_make_subdev( mutex_unlock(&hba->hba_access_mutex); return &se_dev->se_dev_group; out: - if (T10_ALUA(se_dev)->default_tg_pt_gp) { - core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); - T10_ALUA(se_dev)->default_tg_pt_gp = NULL; + if (se_dev->t10_alua.default_tg_pt_gp) { + core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp); + se_dev->t10_alua.default_tg_pt_gp = NULL; } if (dev_stat_grp) kfree(dev_stat_grp->default_groups); @@ -2881,11 +2891,11 @@ static void target_core_drop_subdev( mutex_lock(&hba->hba_access_mutex); t = hba->transport; - spin_lock(&se_global->g_device_lock); - list_del(&se_dev->g_se_dev_list); - spin_unlock(&se_global->g_device_lock); + spin_lock(&se_device_lock); + list_del(&se_dev->se_dev_node); + spin_unlock(&se_device_lock); - dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group; + dev_stat_grp = &se_dev->dev_stat_grps.stat_group; for (i = 0; dev_stat_grp->default_groups[i]; i++) { df_item = &dev_stat_grp->default_groups[i]->cg_item; dev_stat_grp->default_groups[i] = NULL; @@ -2893,7 +2903,7 @@ static void target_core_drop_subdev( } kfree(dev_stat_grp->default_groups); - tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group; + tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) { df_item = &tg_pt_gp_cg->default_groups[i]->cg_item; tg_pt_gp_cg->default_groups[i] = NULL; @@ -2904,7 +2914,7 @@ static void target_core_drop_subdev( * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp * directly from target_core_alua_tg_pt_gp_release(). */ - T10_ALUA(se_dev)->default_tg_pt_gp = NULL; + se_dev->t10_alua.default_tg_pt_gp = NULL; dev_cg = &se_dev->se_dev_group; for (i = 0; dev_cg->default_groups[i]; i++) { @@ -3130,10 +3140,9 @@ static int __init target_core_init_configfs(void) INIT_LIST_HEAD(&g_tf_list); mutex_init(&g_tf_lock); - init_scsi_index_table(); - ret = init_se_global(); + ret = init_se_kmem_caches(); if (ret < 0) - return -1; + return ret; /* * Create $CONFIGFS/target/core default group for HBA <-> Storage Object * and ALUA Logical Unit Group and Target Port Group infrastructure. @@ -3146,29 +3155,29 @@ static int __init target_core_init_configfs(void) goto out_global; } - config_group_init_type_name(&se_global->target_core_hbagroup, + config_group_init_type_name(&target_core_hbagroup, "core", &target_core_cit); - target_cg->default_groups[0] = &se_global->target_core_hbagroup; + target_cg->default_groups[0] = &target_core_hbagroup; target_cg->default_groups[1] = NULL; /* * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/ */ - hba_cg = &se_global->target_core_hbagroup; + hba_cg = &target_core_hbagroup; hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!(hba_cg->default_groups)) { printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); goto out_global; } - config_group_init_type_name(&se_global->alua_group, + config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit); - hba_cg->default_groups[0] = &se_global->alua_group; + hba_cg->default_groups[0] = &alua_group; hba_cg->default_groups[1] = NULL; /* * Add ALUA Logical Unit Group and Target Port Group ConfigFS * groups under /sys/kernel/config/target/core/alua/ */ - alua_cg = &se_global->alua_group; + alua_cg = &alua_group; alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!(alua_cg->default_groups)) { @@ -3176,9 +3185,9 @@ static int __init target_core_init_configfs(void) goto out_global; } - config_group_init_type_name(&se_global->alua_lu_gps_group, + config_group_init_type_name(&alua_lu_gps_group, "lu_gps", &target_core_alua_lu_gps_cit); - alua_cg->default_groups[0] = &se_global->alua_lu_gps_group; + alua_cg->default_groups[0] = &alua_lu_gps_group; alua_cg->default_groups[1] = NULL; /* * Add core/alua/lu_gps/default_lu_gp @@ -3187,7 +3196,7 @@ static int __init target_core_init_configfs(void) if (IS_ERR(lu_gp)) goto out_global; - lu_gp_cg = &se_global->alua_lu_gps_group; + lu_gp_cg = &alua_lu_gps_group; lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!(lu_gp_cg->default_groups)) { @@ -3199,7 +3208,7 @@ static int __init target_core_init_configfs(void) &target_core_alua_lu_gp_cit); lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group; lu_gp_cg->default_groups[1] = NULL; - se_global->default_lu_gp = lu_gp; + default_lu_gp = lu_gp; /* * Register the target_core_mod subsystem with configfs. */ @@ -3229,9 +3238,9 @@ out: core_dev_release_virtual_lun0(); rd_module_exit(); out_global: - if (se_global->default_lu_gp) { - core_alua_free_lu_gp(se_global->default_lu_gp); - se_global->default_lu_gp = NULL; + if (default_lu_gp) { + core_alua_free_lu_gp(default_lu_gp); + default_lu_gp = NULL; } if (lu_gp_cg) kfree(lu_gp_cg->default_groups); @@ -3240,8 +3249,8 @@ out_global: if (hba_cg) kfree(hba_cg->default_groups); kfree(target_cg->default_groups); - release_se_global(); - return -1; + release_se_kmem_caches(); + return ret; } static void __exit target_core_exit_configfs(void) @@ -3251,10 +3260,9 @@ static void __exit target_core_exit_configfs(void) struct config_item *item; int i; - se_global->in_shutdown = 1; subsys = target_core_subsystem[0]; - lu_gp_cg = &se_global->alua_lu_gps_group; + lu_gp_cg = &alua_lu_gps_group; for (i = 0; lu_gp_cg->default_groups[i]; i++) { item = &lu_gp_cg->default_groups[i]->cg_item; lu_gp_cg->default_groups[i] = NULL; @@ -3263,7 +3271,7 @@ static void __exit target_core_exit_configfs(void) kfree(lu_gp_cg->default_groups); lu_gp_cg->default_groups = NULL; - alua_cg = &se_global->alua_group; + alua_cg = &alua_group; for (i = 0; alua_cg->default_groups[i]; i++) { item = &alua_cg->default_groups[i]->cg_item; alua_cg->default_groups[i] = NULL; @@ -3272,7 +3280,7 @@ static void __exit target_core_exit_configfs(void) kfree(alua_cg->default_groups); alua_cg->default_groups = NULL; - hba_cg = &se_global->target_core_hbagroup; + hba_cg = &target_core_hbagroup; for (i = 0; hba_cg->default_groups[i]; i++) { item = &hba_cg->default_groups[i]->cg_item; hba_cg->default_groups[i] = NULL; @@ -3287,17 +3295,15 @@ static void __exit target_core_exit_configfs(void) configfs_unregister_subsystem(subsys); kfree(subsys->su_group.default_groups); - core_alua_free_lu_gp(se_global->default_lu_gp); - se_global->default_lu_gp = NULL; + core_alua_free_lu_gp(default_lu_gp); + default_lu_gp = NULL; printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" " Infrastructure\n"); core_dev_release_virtual_lun0(); rd_module_exit(); - release_se_global(); - - return; + release_se_kmem_caches(); } MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS"); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e76ffc5b2079..fd923854505c 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1,7 +1,7 @@ /******************************************************************************* * Filename: target_core_device.c (based on iscsi_target_device.c) * - * This file contains the iSCSI Virtual Device and Disk Transport + * This file contains the TCM Virtual Device and Disk Transport * agnostic related functions. * * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. @@ -54,25 +54,30 @@ static void se_dev_start(struct se_device *dev); static void se_dev_stop(struct se_device *dev); +static struct se_hba *lun0_hba; +static struct se_subsystem_dev *lun0_su_dev; +/* not static, needed by tpg.c */ +struct se_device *g_lun0_dev; + int transport_get_lun_for_cmd( struct se_cmd *se_cmd, u32 unpacked_lun) { struct se_dev_entry *deve; struct se_lun *se_lun = NULL; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; unsigned long flags; int read_only = 0; if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_lock_irq(&se_sess->se_node_acl->device_list_lock); deve = se_cmd->se_deve = - &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; + &se_sess->se_node_acl->device_list[unpacked_lun]; if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (se_cmd) { deve->total_cmds++; @@ -95,11 +100,11 @@ int transport_get_lun_for_cmd( se_lun = se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } out: - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); if (!se_lun) { if (read_only) { @@ -107,9 +112,9 @@ out: se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), + se_cmd->se_tfo->get_fabric_name(), unpacked_lun); - return -1; + return -EACCES; } else { /* * Use the se_portal_group->tpg_virt_lun0 to allow for @@ -121,9 +126,9 @@ out: se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), + se_cmd->se_tfo->get_fabric_name(), unpacked_lun); - return -1; + return -ENODEV; } /* * Force WRITE PROTECT for virtual LUN 0 @@ -132,15 +137,15 @@ out: (se_cmd->data_direction != DMA_NONE)) { se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -EACCES; } #if 0 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", - CMD_TFO(se_cmd)->get_fabric_name()); + se_cmd->se_tfo->get_fabric_name()); #endif se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } } @@ -151,7 +156,7 @@ out: if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } { @@ -171,10 +176,10 @@ out: */ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); - atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); + atomic_set(&se_cmd->t_task->transport_lun_active, 1); #if 0 printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", - CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); + se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun); #endif spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); @@ -189,35 +194,35 @@ int transport_get_lun_for_tmr( struct se_device *dev = NULL; struct se_dev_entry *deve; struct se_lun *se_lun = NULL; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_lock_irq(&se_sess->se_node_acl->device_list_lock); deve = se_cmd->se_deve = - &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; + &se_sess->se_node_acl->device_list[unpacked_lun]; if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; dev = se_lun->lun_se_dev; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ } - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); if (!se_lun) { printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", - CMD_TFO(se_cmd)->get_fabric_name(), + se_cmd->se_tfo->get_fabric_name(), unpacked_lun); se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } /* * Determine if the struct se_lun is online. @@ -225,7 +230,7 @@ int transport_get_lun_for_tmr( /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -1; + return -ENODEV; } se_tmr->tmr_dev = dev; @@ -263,14 +268,14 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( if (!(lun)) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } port = lun->lun_sep; if (!(port)) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } if (port->sep_rtpi != rtpi) @@ -308,7 +313,7 @@ int core_free_device_list_for_node( if (!deve->se_lun) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } lun = deve->se_lun; @@ -334,8 +339,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; deve->deve_cmds--; spin_unlock_irq(&se_nacl->device_list_lock); - - return; } void core_update_device_list_access( @@ -355,8 +358,6 @@ void core_update_device_list_access( deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; } spin_unlock_irq(&nacl->device_list_lock); - - return; } /* core_update_device_list_for_node(): @@ -408,14 +409,14 @@ int core_update_device_list_for_node( " already set for demo mode -> explict" " LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); - return -1; + return -EINVAL; } if (deve->se_lun != lun) { printk(KERN_ERR "struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" " -> explict LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); - return -1; + return -EINVAL; } deve->se_lun_acl = lun_acl; trans = 1; @@ -503,8 +504,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) spin_lock_bh(&tpg->acl_node_lock); } spin_unlock_bh(&tpg->acl_node_lock); - - return; } static struct se_port *core_alloc_port(struct se_device *dev) @@ -514,7 +513,7 @@ static struct se_port *core_alloc_port(struct se_device *dev) port = kzalloc(sizeof(struct se_port), GFP_KERNEL); if (!(port)) { printk(KERN_ERR "Unable to allocate struct se_port\n"); - return NULL; + return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&port->sep_alua_list); INIT_LIST_HEAD(&port->sep_list); @@ -527,7 +526,7 @@ static struct se_port *core_alloc_port(struct se_device *dev) printk(KERN_WARNING "Reached dev->dev_port_count ==" " 0x0000ffff\n"); spin_unlock(&dev->se_port_lock); - return NULL; + return ERR_PTR(-ENOSPC); } again: /* @@ -565,7 +564,7 @@ static void core_export_port( struct se_port *port, struct se_lun *lun) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; spin_lock(&dev->se_port_lock); @@ -578,7 +577,7 @@ static void core_export_port( list_add_tail(&port->sep_list, &dev->dev_sep_list); spin_unlock(&dev->se_port_lock); - if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { + if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" @@ -587,11 +586,11 @@ static void core_export_port( } spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, - T10_ALUA(su_dev)->default_tg_pt_gp); + su_dev->t10_alua.default_tg_pt_gp); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" " Group: alua/default_tg_pt_gp\n", - TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); + dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); } dev->dev_port_count++; @@ -618,8 +617,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port) list_del(&port->sep_list); dev->dev_port_count--; kfree(port); - - return; } int core_dev_export( @@ -630,8 +627,8 @@ int core_dev_export( struct se_port *port; port = core_alloc_port(dev); - if (!(port)) - return -1; + if (IS_ERR(port)) + return PTR_ERR(port); lun->lun_se_dev = dev; se_dev_start(dev); @@ -668,12 +665,12 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) { struct se_dev_entry *deve; struct se_lun *se_lun; - struct se_session *se_sess = SE_SESS(se_cmd); + struct se_session *se_sess = se_cmd->se_sess; struct se_task *se_task; - unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; + unsigned char *buf = se_cmd->t_task->t_task_buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) + list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list) break; if (!(se_task)) { @@ -692,9 +689,9 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) goto done; } - spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_lock_irq(&se_sess->se_node_acl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { - deve = &SE_NODE_ACL(se_sess)->device_list[i]; + deve = &se_sess->se_node_acl->device_list[i]; if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) continue; se_lun = deve->se_lun; @@ -711,7 +708,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) offset += 8; cdb_offset += 8; } - spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); /* * See SPC3 r07, page 159. @@ -755,26 +752,20 @@ void se_release_device_for_hba(struct se_device *dev) core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); - kfree(dev->dev_status_queue_obj); - kfree(dev->dev_queue_obj); kfree(dev); - - return; } void se_release_vpd_for_dev(struct se_device *dev) { struct t10_vpd *vpd, *vpd_tmp; - spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); + spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); list_for_each_entry_safe(vpd, vpd_tmp, - &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { + &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) { list_del(&vpd->vpd_list); kfree(vpd); } - spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); - - return; + spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock); } /* se_free_virtual_device(): @@ -860,48 +851,48 @@ void se_dev_set_default_attribs( { struct queue_limits *limits = &dev_limits->limits; - DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; - DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; - DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; - DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; - DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; - DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; - DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; - DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; - DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; - DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; - DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; + dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO; + dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE; + dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ; + dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE; + dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; + dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS; + dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU; + dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS; + dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; + dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; + dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; /* * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK * iblock_create_virtdevice() from struct queue_limits values * if blk_queue_discard()==1 */ - DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; - DEV_ATTRIB(dev)->max_unmap_block_desc_count = - DA_MAX_UNMAP_BLOCK_DESC_COUNT; - DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; - DEV_ATTRIB(dev)->unmap_granularity_alignment = + dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; + dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = + DA_MAX_UNMAP_BLOCK_DESC_COUNT; + dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; + dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; /* * block_size is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; - DEV_ATTRIB(dev)->block_size = limits->logical_block_size; + dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size; + dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size; /* * max_sectors is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; - DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; + dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors; /* * Set optimal_sectors from max_sectors, which can be lowered via * configfs. */ - DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; + dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors; /* * queue_depth is based on subsystem plugin dependent requirements. */ - DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; - DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth; + dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth; } int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) @@ -909,9 +900,9 @@ int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) if (task_timeout > DA_TASK_TIMEOUT_MAX) { printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); - return -1; + return -EINVAL; } else { - DEV_ATTRIB(dev)->task_timeout = task_timeout; + dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", dev, task_timeout); } @@ -923,9 +914,9 @@ int se_dev_set_max_unmap_lba_count( struct se_device *dev, u32 max_unmap_lba_count) { - DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; + dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", - dev, DEV_ATTRIB(dev)->max_unmap_lba_count); + dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); return 0; } @@ -933,9 +924,10 @@ int se_dev_set_max_unmap_block_desc_count( struct se_device *dev, u32 max_unmap_block_desc_count) { - DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; + dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = + max_unmap_block_desc_count; printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", - dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); + dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); return 0; } @@ -943,9 +935,9 @@ int se_dev_set_unmap_granularity( struct se_device *dev, u32 unmap_granularity) { - DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; + dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", - dev, DEV_ATTRIB(dev)->unmap_granularity); + dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); return 0; } @@ -953,9 +945,9 @@ int se_dev_set_unmap_granularity_alignment( struct se_device *dev, u32 unmap_granularity_alignment) { - DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; + dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", - dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); + dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); return 0; } @@ -963,19 +955,19 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->dpo_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); - return -1; + if (dev->transport->dpo_emulated == NULL) { + printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); - return -1; + if (dev->transport->dpo_emulated(dev) == 0) { + printk(KERN_ERR "dev->transport->dpo_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_dpo = flag; + dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" - " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); + " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); return 0; } @@ -983,19 +975,19 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->fua_write_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); - return -1; + if (dev->transport->fua_write_emulated == NULL) { + printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); - return -1; + if (dev->transport->fua_write_emulated(dev) == 0) { + printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_fua_write = flag; + dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", - dev, DEV_ATTRIB(dev)->emulate_fua_write); + dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); return 0; } @@ -1003,19 +995,19 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->fua_read_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); - return -1; + if (dev->transport->fua_read_emulated == NULL) { + printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); - return -1; + if (dev->transport->fua_read_emulated(dev) == 0) { + printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_fua_read = flag; + dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", - dev, DEV_ATTRIB(dev)->emulate_fua_read); + dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); return 0; } @@ -1023,19 +1015,19 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->write_cache_emulated == NULL) { - printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); - return -1; + if (dev->transport->write_cache_emulated == NULL) { + printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n"); + return -EINVAL; } - if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { - printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); - return -1; + if (dev->transport->write_cache_emulated(dev) == 0) { + printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n"); + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_write_cache = flag; + dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", - dev, DEV_ATTRIB(dev)->emulate_write_cache); + dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); return 0; } @@ -1043,7 +1035,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1) && (flag != 2)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { @@ -1051,11 +1043,11 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) " UA_INTRLCK_CTRL while dev_export_obj: %d count" " exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; + dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", - dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); + dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); return 0; } @@ -1064,18 +1056,18 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->emulate_tas = flag; + dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", - dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); + dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); return 0; } @@ -1084,18 +1076,18 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { + if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { printk(KERN_ERR "Generic Block Discard not supported\n"); return -ENOSYS; } - DEV_ATTRIB(dev)->emulate_tpu = flag; + dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", dev, flag); return 0; @@ -1105,18 +1097,18 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { + if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { printk(KERN_ERR "Generic Block Discard not supported\n"); return -ENOSYS; } - DEV_ATTRIB(dev)->emulate_tpws = flag; + dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", dev, flag); return 0; @@ -1126,11 +1118,11 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { printk(KERN_ERR "Illegal value %d\n", flag); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->enforce_pr_isids = flag; + dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, - (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); + (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); return 0; } @@ -1145,35 +1137,35 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } if (!(queue_depth)) { printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" "_depth\n", dev); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" " exceeds TCM/SE_Device TCQ: %u\n", dev, queue_depth, - DEV_ATTRIB(dev)->hw_queue_depth); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth); + return -EINVAL; } } else { - if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { - if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { + if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { + if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { printk(KERN_ERR "dev[%p]: Passed queue_depth:" " %u exceeds TCM/SE_Device MAX" " TCQ: %u\n", dev, queue_depth, - DEV_ATTRIB(dev)->hw_queue_depth); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_queue_depth); + return -EINVAL; } } } - DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; + dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; if (queue_depth > orig_queue_depth) atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); else if (queue_depth < orig_queue_depth) @@ -1192,46 +1184,46 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) printk(KERN_ERR "dev[%p]: Unable to change SE Device" " max_sectors while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } if (!(max_sectors)) { printk(KERN_ERR "dev[%p]: Illegal ZERO value for" " max_sectors\n", dev); - return -1; + return -EINVAL; } if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MIN); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors:" " %u\n", dev, max_sectors, - DEV_ATTRIB(dev)->hw_max_sectors); - return -1; + dev->se_sub_dev->se_dev_attrib.hw_max_sectors); + return -EINVAL; } } else { if (!(force) && (max_sectors > - DEV_ATTRIB(dev)->hw_max_sectors)) { + dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors" ": %u, use force=1 to override.\n", dev, - max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); - return -1; + max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); + return -EINVAL; } if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" " greater than DA_STATUS_MAX_SECTORS_MAX:" " %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MAX); - return -1; + return -EINVAL; } } - DEV_ATTRIB(dev)->max_sectors = max_sectors; + dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; printk("dev[%p]: SE Device max_sectors changed to %u\n", dev, max_sectors); return 0; @@ -1245,19 +1237,19 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" " changed for TCM/pSCSI\n", dev); return -EINVAL; } - if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { + if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" " greater than max_sectors: %u\n", dev, - optimal_sectors, DEV_ATTRIB(dev)->max_sectors); + optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); return -EINVAL; } - DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; + dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", dev, optimal_sectors); return 0; @@ -1269,7 +1261,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" " while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); - return -1; + return -EINVAL; } if ((block_size != 512) && @@ -1279,17 +1271,17 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" " for SE device, must be 512, 1024, 2048 or 4096\n", dev, block_size); - return -1; + return -EINVAL; } - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" " Physical Device, use for Linux/SCSI to change" " block_size for underlying hardware\n", dev); - return -1; + return -EINVAL; } - DEV_ATTRIB(dev)->block_size = block_size; + dev->se_sub_dev->se_dev_attrib.block_size = block_size; printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", dev, block_size); return 0; @@ -1323,14 +1315,14 @@ struct se_lun *core_dev_add_lun( return NULL; printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" - " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, - TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); + " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); /* * Update LUN maps for dynamically added initiators when * generate_node_acl is enabled. */ - if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { + if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) { struct se_node_acl *acl; spin_lock_bh(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { @@ -1364,9 +1356,9 @@ int core_dev_del_lun( core_tpg_post_dellun(tpg, lun); printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" - " device object\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, - TPG_TFO(tpg)->get_fabric_name()); + " device object\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name()); return 0; } @@ -1379,9 +1371,9 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" "_PER_TPG-1: %u for Target Portal Group: %hu\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1390,8 +1382,8 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { printk(KERN_ERR "%s Logical Unit Number: %u is not free on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1412,9 +1404,9 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" "_TPG-1: %u for Target Portal Group: %hu\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1423,8 +1415,8 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { printk(KERN_ERR "%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return NULL; } @@ -1444,7 +1436,7 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); *ret = -EOVERFLOW; return NULL; } @@ -1481,8 +1473,8 @@ int core_dev_add_initiator_node_lun_acl( if (!(lun)) { printk(KERN_ERR "%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); return -EINVAL; } @@ -1507,8 +1499,8 @@ int core_dev_add_initiator_node_lun_acl( spin_unlock(&lun->lun_acl_lock); printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " - " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, + " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", lacl->initiatorname); /* @@ -1547,8 +1539,8 @@ int core_dev_del_initiator_node_lun_acl( printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" " InitiatorNode: %s Mapped LUN: %u\n", - TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->initiatorname, lacl->mapped_lun); return 0; @@ -1559,9 +1551,9 @@ void core_dev_free_initiator_node_lun_acl( struct se_lun_acl *lacl) { printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" - " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), - TPG_TFO(tpg)->get_fabric_name(), + " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), + tpg->se_tpg_tfo->get_fabric_name(), lacl->initiatorname, lacl->mapped_lun); kfree(lacl); @@ -1580,7 +1572,7 @@ int core_dev_setup_virtual_lun0(void) if (IS_ERR(hba)) return PTR_ERR(hba); - se_global->g_lun0_hba = hba; + lun0_hba = hba; t = hba->transport; se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); @@ -1590,17 +1582,17 @@ int core_dev_setup_virtual_lun0(void) ret = -ENOMEM; goto out; } - INIT_LIST_HEAD(&se_dev->g_se_dev_list); + INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); - INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); - INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); - spin_lock_init(&se_dev->t10_reservation.registration_lock); - spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); + INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); + INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list); + spin_lock_init(&se_dev->t10_pr.registration_lock); + spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock); INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); spin_lock_init(&se_dev->se_dev_lock); - se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; + se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; se_dev->t10_wwn.t10_sub_dev = se_dev; se_dev->t10_alua.t10_sub_dev = se_dev; se_dev->se_dev_attrib.da_sub_dev = se_dev; @@ -1613,27 +1605,27 @@ int core_dev_setup_virtual_lun0(void) ret = -ENOMEM; goto out; } - se_global->g_lun0_su_dev = se_dev; + lun0_su_dev = se_dev; memset(buf, 0, 16); sprintf(buf, "rd_pages=8"); t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); - if (!(dev) || IS_ERR(dev)) { - ret = -ENOMEM; + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); goto out; } se_dev->se_dev_ptr = dev; - se_global->g_lun0_dev = dev; + g_lun0_dev = dev; return 0; out: - se_global->g_lun0_su_dev = NULL; + lun0_su_dev = NULL; kfree(se_dev); - if (se_global->g_lun0_hba) { - core_delete_hba(se_global->g_lun0_hba); - se_global->g_lun0_hba = NULL; + if (lun0_hba) { + core_delete_hba(lun0_hba); + lun0_hba = NULL; } return ret; } @@ -1641,14 +1633,14 @@ out: void core_dev_release_virtual_lun0(void) { - struct se_hba *hba = se_global->g_lun0_hba; - struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; + struct se_hba *hba = lun0_hba; + struct se_subsystem_dev *su_dev = lun0_su_dev; if (!(hba)) return; - if (se_global->g_lun0_dev) - se_free_virtual_device(se_global->g_lun0_dev, hba); + if (g_lun0_dev) + se_free_virtual_device(g_lun0_dev, hba); kfree(su_dev); core_delete_hba(hba); diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 07ab5a3bb8e8..0b1659d0fefc 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link( lun_access = deve->lun_flags; else lun_access = - (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect( + (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : TRANSPORT_LUNFLAGS_READ_WRITE; spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); @@ -204,7 +204,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect( printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" " Mapped LUN: %u Write Protect bit to %s\n", - TPG_TFO(se_tpg)->get_fabric_name(), + se_tpg->se_tpg_tfo->get_fabric_name(), lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); return count; @@ -379,7 +379,7 @@ static struct config_group *target_fabric_make_mappedlun( lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; lacl_cg->default_groups[1] = NULL; - ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; + ml_stat_grp = &lacl->ml_stat_grps.stat_group; ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, GFP_KERNEL); if (!ml_stat_grp->default_groups) { @@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun( struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; int i; - ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; + ml_stat_grp = &lacl->ml_stat_grps.stat_group; for (i = 0; ml_stat_grp->default_groups[i]; i++) { df_item = &ml_stat_grp->default_groups[i]->cg_item; ml_stat_grp->default_groups[i] = NULL; @@ -914,7 +914,7 @@ static struct config_group *target_fabric_make_lun( lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; lun_cg->default_groups[1] = NULL; - port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; + port_stat_grp = &lun->port_stat_grps.stat_group; port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, GFP_KERNEL); if (!port_stat_grp->default_groups) { @@ -941,7 +941,7 @@ static void target_fabric_drop_lun( struct config_group *lun_cg, *port_stat_grp; int i; - port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; + port_stat_grp = &lun->port_stat_grps.stat_group; for (i = 0; port_stat_grp->default_groups[i]; i++) { df_item = &port_stat_grp->default_groups[i]->cg_item; port_stat_grp->default_groups[i] = NULL; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 150c4305f385..0c44bc051484 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -67,22 +67,19 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); if (!(fd_host)) { printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); - return -1; + return -ENOMEM; } fd_host->fd_host_id = host_id; - atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH); - atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH); - hba->hba_ptr = (void *) fd_host; + hba->hba_ptr = fd_host; printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" " Target Core Stack %s\n", hba->hba_id, FD_VERSION, TARGET_CORE_MOD_VERSION); printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" - " Target Core with TCQ Depth: %d MaxSectors: %u\n", - hba->hba_id, fd_host->fd_host_id, - atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS); + " MaxSectors: %u\n", + hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); return 0; } @@ -282,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd) return NULL; } - fd_req->fd_dev = SE_DEV(cmd)->dev_ptr; + fd_req->fd_dev = cmd->se_lun->lun_se_dev->dev_ptr; return &fd_req->fd_task; } @@ -294,13 +291,14 @@ static int fd_do_readv(struct se_task *task) struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; - loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); + loff_t pos = (task->task_lba * + task->se_dev->se_sub_dev->se_dev_attrib.block_size); int ret = 0, i; iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); if (!(iov)) { printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); - return -1; + return -ENOMEM; } for (i = 0; i < task->task_sg_num; i++) { @@ -324,13 +322,13 @@ static int fd_do_readv(struct se_task *task) printk(KERN_ERR "vfs_readv() returned %d," " expecting %d for S_ISBLK\n", ret, (int)task->task_size); - return -1; + return (ret < 0 ? ret : -EINVAL); } } else { if (ret < 0) { printk(KERN_ERR "vfs_readv() returned %d for non" " S_ISBLK\n", ret); - return -1; + return ret; } } @@ -344,13 +342,14 @@ static int fd_do_writev(struct se_task *task) struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; - loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); + loff_t pos = (task->task_lba * + task->se_dev->se_sub_dev->se_dev_attrib.block_size); int ret, i = 0; iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); if (!(iov)) { printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); - return -1; + return -ENOMEM; } for (i = 0; i < task->task_sg_num; i++) { @@ -367,7 +366,7 @@ static int fd_do_writev(struct se_task *task) if (ret < 0 || ret != task->task_size) { printk(KERN_ERR "vfs_writev() returned %d\n", ret); - return -1; + return (ret < 0 ? ret : -EINVAL); } return 1; @@ -375,7 +374,7 @@ static int fd_do_writev(struct se_task *task) static void fd_emulate_sync_cache(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; int immed = (cmd->t_task->t_task_cdb[1] & 0x2); @@ -396,7 +395,7 @@ static void fd_emulate_sync_cache(struct se_task *task) start = 0; end = LLONG_MAX; } else { - start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size; + start = cmd->t_task->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; if (cmd->data_length) end = start + cmd->data_length; else @@ -446,7 +445,7 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) { struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; - loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size; + loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size; loff_t end = start + task->task_size; int ret; @@ -474,9 +473,9 @@ static int fd_do_task(struct se_task *task) ret = fd_do_writev(task); if (ret > 0 && - DEV_ATTRIB(dev)->emulate_write_cache > 0 && - DEV_ATTRIB(dev)->emulate_fua_write > 0 && - T_TASK(cmd)->t_tasks_fua) { + dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && + dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && + cmd->t_task->t_tasks_fua) { /* * We might need to be a bit smarter here * and return some sense data to let the initiator @@ -599,7 +598,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { printk(KERN_ERR "Missing fd_dev_name=\n"); - return -1; + return -EINVAL; } return 0; @@ -654,7 +653,7 @@ static sector_t fd_get_blocks(struct se_device *dev) { struct fd_dev *fd_dev = dev->dev_ptr; unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, - DEV_ATTRIB(dev)->block_size); + dev->se_sub_dev->se_dev_attrib.block_size); return blocks_long; } diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index ef4de2b4bd46..6386d3f60631 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -4,8 +4,6 @@ #define FD_VERSION "4.0" #define FD_MAX_DEV_NAME 256 -/* Maximum queuedepth for the FILEIO HBA */ -#define FD_HBA_QUEUE_DEPTH 256 #define FD_DEVICE_QUEUE_DEPTH 32 #define FD_MAX_DEVICE_QUEUE_DEPTH 128 #define FD_BLOCKSIZE 512 diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 0b8f8da89019..bd9da25bc945 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -1,7 +1,7 @@ /******************************************************************************* * Filename: target_core_hba.c * - * This file copntains the iSCSI HBA Transport related functions. + * This file contains the TCM HBA Transport related functions. * * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. * Copyright (c) 2005, 2006, 2007 SBE, Inc. @@ -45,6 +45,11 @@ static LIST_HEAD(subsystem_list); static DEFINE_MUTEX(subsystem_mutex); +static u32 hba_id_counter; + +static DEFINE_SPINLOCK(hba_lock); +static LIST_HEAD(hba_list); + int transport_subsystem_register(struct se_subsystem_api *sub_api) { struct se_subsystem_api *s; @@ -110,15 +115,11 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) INIT_LIST_HEAD(&hba->hba_dev_list); spin_lock_init(&hba->device_lock); - spin_lock_init(&hba->hba_queue_lock); mutex_init(&hba->hba_access_mutex); hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); hba->hba_flags |= hba_flags; - atomic_set(&hba->max_queue_depth, 0); - atomic_set(&hba->left_queue_depth, 0); - hba->transport = core_get_backend(plugin_name); if (!hba->transport) { ret = -EINVAL; @@ -129,10 +130,10 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) if (ret < 0) goto out_module_put; - spin_lock(&se_global->hba_lock); - hba->hba_id = se_global->g_hba_id_counter++; - list_add_tail(&hba->hba_list, &se_global->g_hba_list); - spin_unlock(&se_global->hba_lock); + spin_lock(&hba_lock); + hba->hba_id = hba_id_counter++; + list_add_tail(&hba->hba_node, &hba_list); + spin_unlock(&hba_lock); printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" " Core\n", hba->hba_id); @@ -156,9 +157,9 @@ core_delete_hba(struct se_hba *hba) hba->transport->detach_hba(hba); - spin_lock(&se_global->hba_lock); - list_del(&hba->hba_list); - spin_unlock(&se_global->hba_lock); + spin_lock(&hba_lock); + list_del(&hba->hba_node); + spin_unlock(&hba_lock); printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" " Core\n", hba->hba_id); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 86639004af9e..fb159876fffc 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -74,17 +74,14 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) ib_host->iblock_host_id = host_id; - atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); - atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH); hba->hba_ptr = (void *) ib_host; printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic" - " Target Core TCQ Depth: %d\n", hba->hba_id, - ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth)); + printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", + hba->hba_id, ib_host->iblock_host_id); return 0; } @@ -188,15 +185,15 @@ static struct se_device *iblock_create_virtdevice( * in ATA and we need to set TPE=1 */ if (blk_queue_discard(q)) { - DEV_ATTRIB(dev)->max_unmap_lba_count = + dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = q->limits.max_discard_sectors; /* * Currently hardcoded to 1 in Linux/SCSI code.. */ - DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1; - DEV_ATTRIB(dev)->unmap_granularity = + dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; + dev->se_sub_dev->se_dev_attrib.unmap_granularity = q->limits.discard_granularity; - DEV_ATTRIB(dev)->unmap_granularity_alignment = + dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = q->limits.discard_alignment; printk(KERN_INFO "IBLOCK: BLOCK Discard support available," @@ -243,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd) return NULL; } - ib_req->ib_dev = SE_DEV(cmd)->dev_ptr; + ib_req->ib_dev = cmd->se_lun->lun_se_dev->dev_ptr; atomic_set(&ib_req->ib_bio_cnt, 0); return &ib_req->ib_task; } @@ -257,12 +254,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( bdev_logical_block_size(bd)) - 1); u32 block_size = bdev_logical_block_size(bd); - if (block_size == DEV_ATTRIB(dev)->block_size) + if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) return blocks_long; switch (block_size) { case 4096: - switch (DEV_ATTRIB(dev)->block_size) { + switch (dev->se_sub_dev->se_dev_attrib.block_size) { case 2048: blocks_long <<= 1; break; @@ -276,7 +273,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 2048: - switch (DEV_ATTRIB(dev)->block_size) { + switch (dev->se_sub_dev->se_dev_attrib.block_size) { case 4096: blocks_long >>= 1; break; @@ -291,7 +288,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 1024: - switch (DEV_ATTRIB(dev)->block_size) { + switch (dev->se_sub_dev->se_dev_attrib.block_size) { case 4096: blocks_long >>= 2; break; @@ -306,7 +303,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( } break; case 512: - switch (DEV_ATTRIB(dev)->block_size) { + switch (dev->se_sub_dev->se_dev_attrib.block_size) { case 4096: blocks_long >>= 3; break; @@ -332,9 +329,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( */ static void iblock_emulate_sync_cache(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; - int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2); + int immed = (cmd->t_task->t_task_cdb[1] & 0x2); sector_t error_sector; int ret; @@ -401,9 +398,9 @@ static int iblock_do_task(struct se_task *task) * Force data to disk if we pretend to not have a volatile * write cache, or the initiator set the Force Unit Access bit. */ - if (DEV_ATTRIB(dev)->emulate_write_cache == 0 || - (DEV_ATTRIB(dev)->emulate_fua_write > 0 && - T_TASK(task->task_se_cmd)->t_tasks_fua)) + if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || + (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && + task->task_se_cmd->t_task->t_tasks_fua)) rw = WRITE_FUA; else rw = WRITE; @@ -527,7 +524,7 @@ static ssize_t iblock_check_configfs_dev_params( if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); - return -1; + return -EINVAL; } return 0; @@ -611,7 +608,7 @@ static struct bio *iblock_get_bio( static int iblock_map_task_SG(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; struct iblock_dev *ib_dev = task->se_dev->dev_ptr; struct iblock_req *ib_req = IBLOCK_REQ(task); struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; @@ -623,17 +620,17 @@ static int iblock_map_task_SG(struct se_task *task) * Do starting conversion up from non 512-byte blocksize with * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. */ - if (DEV_ATTRIB(dev)->block_size == 4096) + if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) block_lba = (task->task_lba << 3); - else if (DEV_ATTRIB(dev)->block_size == 2048) + else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) block_lba = (task->task_lba << 2); - else if (DEV_ATTRIB(dev)->block_size == 1024) + else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) block_lba = (task->task_lba << 1); - else if (DEV_ATTRIB(dev)->block_size == 512) + else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) block_lba = task->task_lba; else { printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" - " %u\n", DEV_ATTRIB(dev)->block_size); + " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); return PYX_TRANSPORT_LU_COMM_FAILURE; } diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 64c1f4d69f76..6b6d17bb1fd6 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -3,7 +3,6 @@ #define IBLOCK_VERSION "4.0" -#define IBLOCK_HBA_QUEUE_DEPTH 512 #define IBLOCK_DEVICE_QUEUE_DEPTH 32 #define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128 #define IBLOCK_MAX_CDBS 16 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index b662db3a320b..27a7525971b9 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -105,13 +105,13 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) } if (dev->dev_reserved_node_acl != sess->se_node_acl) { spin_unlock(&dev->dev_reservation_lock); - return -1; + return -EINVAL; } if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) { spin_unlock(&dev->dev_reservation_lock); return 0; } - ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1; + ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL; spin_unlock(&dev->dev_reservation_lock); return ret; @@ -143,8 +143,8 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd) dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; } printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" - " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(), - SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, + " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), + cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; - if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) && - (T_TASK(cmd)->t_task_cdb[1] & 0x02)) { + if ((cmd->t_task->t_task_cdb[1] & 0x01) && + (cmd->t_task->t_task_cdb[1] & 0x02)) { printk(KERN_ERR "LongIO and Obselete Bits set, returning" " ILLEGAL_REQUEST\n"); return PYX_TRANSPORT_ILLEGAL_REQUEST; @@ -174,12 +174,12 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) if (dev->dev_reserved_node_acl && (dev->dev_reserved_node_acl != sess->se_node_acl)) { printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); printk(KERN_ERR "Original reserver LUN: %u %s\n", - SE_LUN(cmd)->unpacked_lun, + cmd->se_lun->unpacked_lun, dev->dev_reserved_node_acl->initiatorname); printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" - " from %s \n", SE_LUN(cmd)->unpacked_lun, + " from %s \n", cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -193,8 +193,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; } printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" - " for %s\n", TPG_TFO(tpg)->get_fabric_name(), - SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun, + " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), + cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -215,9 +215,9 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) struct se_session *se_sess = cmd->se_sess; struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation; - unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; - int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS); + struct t10_reservation *pr_tmpl = &su_dev->t10_pr; + unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); int conflict = 0; if (!(se_sess)) @@ -307,7 +307,7 @@ static int core_scsi3_pr_seq_non_holder( u32 pr_reg_type) { struct se_dev_entry *se_deve; - struct se_session *se_sess = SE_SESS(cmd); + struct se_session *se_sess = cmd->se_sess; int other_cdb = 0, ignore_reg; int registered_nexus = 0, ret = 1; /* Conflict by default */ int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */ @@ -362,7 +362,7 @@ static int core_scsi3_pr_seq_non_holder( registered_nexus = 1; break; default: - return -1; + return -EINVAL; } /* * Referenced from spc4r17 table 45 for *NON* PR holder access @@ -414,7 +414,7 @@ static int core_scsi3_pr_seq_non_holder( default: printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" " action: 0x%02x\n", cdb[1] & 0x1f); - return -1; + return -EINVAL; } break; case RELEASE: @@ -461,7 +461,7 @@ static int core_scsi3_pr_seq_non_holder( default: printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", (cdb[1] & 0x1f)); - return -1; + return -EINVAL; } break; case ACCESS_CONTROL_IN: @@ -549,7 +549,7 @@ static int core_scsi3_pr_seq_non_holder( static u32 core_scsi3_pr_generation(struct se_device *dev) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; u32 prg; /* * PRGeneration field shall contain the value of a 32-bit wrapping @@ -561,7 +561,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev) * See spc4r17 section 6.3.12 READ_KEYS service action */ spin_lock(&dev->dev_reservation_lock); - prg = T10_RES(su_dev)->pr_generation++; + prg = su_dev->t10_pr.pr_generation++; spin_unlock(&dev->dev_reservation_lock); return prg; @@ -592,14 +592,14 @@ static int core_scsi3_pr_reservation_check( cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key; if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) { spin_unlock(&dev->dev_reservation_lock); - return -1; + return -EINVAL; } if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { spin_unlock(&dev->dev_reservation_lock); return 0; } ret = (dev->dev_pr_res_holder->pr_reg_bin_isid == - sess->sess_bin_isid) ? 0 : -1; + sess->sess_bin_isid) ? 0 : -EINVAL; /* * Use bit in *pr_reg_type to notify ISID mismatch in * core_scsi3_pr_seq_non_holder(). @@ -620,7 +620,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( int all_tg_pt, int aptpl) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct t10_pr_registration *pr_reg; pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); @@ -629,7 +629,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( return NULL; } - pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len, + pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, GFP_ATOMIC); if (!(pr_reg->pr_aptpl_buf)) { printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); @@ -803,7 +803,7 @@ out: } int core_scsi3_alloc_aptpl_registration( - struct t10_reservation_template *pr_tmpl, + struct t10_reservation *pr_tmpl, u64 sa_res_key, unsigned char *i_port, unsigned char *isid, @@ -819,13 +819,13 @@ int core_scsi3_alloc_aptpl_registration( if (!(i_port) || !(t_port) || !(sa_res_key)) { printk(KERN_ERR "Illegal parameters for APTPL registration\n"); - return -1; + return -EINVAL; } pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); if (!(pr_reg)) { printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); - return -1; + return -ENOMEM; } pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); @@ -893,11 +893,11 @@ static void core_scsi3_aptpl_reserve( printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" " new reservation holder TYPE: %s ALL_TG_PT: %d\n", - TPG_TFO(tpg)->get_fabric_name(), + tpg->se_tpg_tfo->get_fabric_name(), core_scsi3_pr_dump_type(pr_reg->pr_res_type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", - TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname, + tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, (prf_isid) ? &i_buf[0] : ""); } @@ -913,7 +913,7 @@ static int __core_scsi3_check_aptpl_registration( struct se_dev_entry *deve) { struct t10_pr_registration *pr_reg, *pr_reg_tmp; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; unsigned char i_port[PR_APTPL_MAX_IPORT_LEN]; unsigned char t_port[PR_APTPL_MAX_TPORT_LEN]; u16 tpgt; @@ -925,8 +925,8 @@ static int __core_scsi3_check_aptpl_registration( */ snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname); snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s", - TPG_TFO(tpg)->tpg_get_wwn(tpg)); - tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg); + tpg->se_tpg_tfo->tpg_get_wwn(tpg)); + tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); /* * Look for the matching registrations+reservation from those * created from APTPL metadata. Note that multiple registrations @@ -980,11 +980,11 @@ int core_scsi3_check_aptpl_registration( struct se_lun *lun, struct se_lun_acl *lun_acl) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct se_node_acl *nacl = lun_acl->se_lun_nacl; struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun]; - if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS) + if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; return __core_scsi3_check_aptpl_registration(dev, tpg, lun, @@ -1017,7 +1017,7 @@ static void __core_scsi3_dump_registration( printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" " Port(s)\n", tfo->get_fabric_name(), (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", - TRANSPORT(dev)->name); + dev->transport->name); printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), pr_reg->pr_res_key, pr_reg->pr_res_generation, @@ -1035,10 +1035,10 @@ static void __core_scsi3_add_registration( int register_type, int register_move) { - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo; struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; /* * Increment PRgeneration counter for struct se_device upon a successful @@ -1050,7 +1050,7 @@ static void __core_scsi3_add_registration( * for the REGISTER. */ pr_reg->pr_res_generation = (register_move) ? - T10_RES(su_dev)->pr_generation++ : + su_dev->t10_pr.pr_generation++ : core_scsi3_pr_generation(dev); spin_lock(&pr_tmpl->registration_lock); @@ -1107,7 +1107,7 @@ static int core_scsi3_alloc_registration( pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, sa_res_key, all_tg_pt, aptpl); if (!(pr_reg)) - return -1; + return -EPERM; __core_scsi3_add_registration(dev, nacl, pr_reg, register_type, register_move); @@ -1119,7 +1119,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( struct se_node_acl *nacl, unsigned char *isid) { - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp; struct se_portal_group *tpg; @@ -1143,8 +1143,8 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( * SCSI Intiatior TransportID w/ ISIDs is enforced * for fabric modules (iSCSI) requiring them. */ - if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { - if (DEV_ATTRIB(dev)->enforce_pr_isids) + if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { + if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) continue; } atomic_inc(&pr_reg->pr_res_holders); @@ -1180,9 +1180,9 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg( struct se_portal_group *tpg = nacl->se_tpg; unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL; - if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) { + if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { memset(&buf[0], 0, PR_REG_ISID_LEN); - TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0], + tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0], PR_REG_ISID_LEN); isid_ptr = &buf[0]; } @@ -1240,7 +1240,7 @@ static int core_scsi3_check_implict_release( " UNREGISTER while existing reservation with matching" " key 0x%016Lx is present from another SCSI Initiator" " Port\n", pr_reg->pr_res_key); - ret = -1; + ret = -EPERM; } spin_unlock(&dev->dev_reservation_lock); @@ -1248,7 +1248,7 @@ static int core_scsi3_check_implict_release( } /* - * Called with struct t10_reservation_template->registration_lock held. + * Called with struct t10_reservation->registration_lock held. */ static void __core_scsi3_free_registration( struct se_device *dev, @@ -1258,7 +1258,7 @@ static void __core_scsi3_free_registration( { struct target_core_fabric_ops *tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; char i_buf[PR_REG_ISID_ID_LEN]; int prf_isid; @@ -1296,7 +1296,7 @@ static void __core_scsi3_free_registration( printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" " Port(s)\n", tfo->get_fabric_name(), (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", - TRANSPORT(dev)->name); + dev->transport->name); printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, pr_reg->pr_res_generation); @@ -1319,7 +1319,7 @@ void core_scsi3_free_pr_reg_from_nacl( struct se_device *dev, struct se_node_acl *nacl) { - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; /* * If the passed se_node_acl matches the reservation holder, @@ -1349,7 +1349,7 @@ void core_scsi3_free_pr_reg_from_nacl( void core_scsi3_free_all_registrations( struct se_device *dev) { - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder; spin_lock(&dev->dev_reservation_lock); @@ -1381,13 +1381,13 @@ void core_scsi3_free_all_registrations( static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg) { - return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, &tpg->tpg_group.cg_item); } static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg) { - configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &tpg->tpg_group.cg_item); atomic_dec(&tpg->tpg_pr_ref_count); @@ -1401,7 +1401,7 @@ static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) if (nacl->dynamic_node_acl) return 0; - return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, &nacl->acl_group.cg_item); } @@ -1415,7 +1415,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl) return; } - configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &nacl->acl_group.cg_item); atomic_dec(&nacl->acl_pr_ref_count); @@ -1436,7 +1436,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; - return configfs_depend_item(TPG_TFO(tpg)->tf_subsys, + return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys, &lun_acl->se_lun_group.cg_item); } @@ -1456,7 +1456,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; - configfs_undepend_item(TPG_TFO(tpg)->tf_subsys, + configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, &lun_acl->se_lun_group.cg_item); atomic_dec(&se_deve->pr_ref_count); @@ -1471,10 +1471,10 @@ static int core_scsi3_decode_spec_i_port( int all_tg_pt, int aptpl) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_port *tmp_port; struct se_portal_group *dest_tpg = NULL, *tmp_tpg; - struct se_session *se_sess = SE_SESS(cmd); + struct se_session *se_sess = cmd->se_sess; struct se_node_acl *dest_node_acl = NULL; struct se_dev_entry *dest_se_deve = NULL, *local_se_deve; struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e; @@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port( struct list_head tid_dest_list; struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; struct target_core_fabric_ops *tmp_tf_ops; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tpdl, tid_len = 0; @@ -1509,7 +1509,7 @@ static int core_scsi3_decode_spec_i_port( tidh_new->dest_node_acl = se_sess->se_node_acl; tidh_new->dest_se_deve = local_se_deve; - local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), + local_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, local_se_deve, l_isid, sa_res_key, all_tg_pt, aptpl); if (!(local_pr_reg)) { @@ -1557,7 +1557,7 @@ static int core_scsi3_decode_spec_i_port( tmp_tpg = tmp_port->sep_tpg; if (!(tmp_tpg)) continue; - tmp_tf_ops = TPG_TFO(tmp_tpg); + tmp_tf_ops = tmp_tpg->se_tpg_tfo; if (!(tmp_tf_ops)) continue; if (!(tmp_tf_ops->get_fabric_proto_ident) || @@ -1625,7 +1625,7 @@ static int core_scsi3_decode_spec_i_port( dest_tpg = tmp_tpg; printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" " %s Port RTPI: %hu\n", - TPG_TFO(dest_tpg)->get_fabric_name(), + dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, dest_rtpi); spin_lock(&dev->se_port_lock); @@ -1642,7 +1642,7 @@ static int core_scsi3_decode_spec_i_port( #if 0 printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" " tid_len: %d for %s + %s\n", - TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length, + dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, tpdl, tid_len, i_str, iport_ptr); #endif if (tid_len > tpdl) { @@ -1663,7 +1663,7 @@ static int core_scsi3_decode_spec_i_port( if (!(dest_se_deve)) { printk(KERN_ERR "Unable to locate %s dest_se_deve" " from destination RTPI: %hu\n", - TPG_TFO(dest_tpg)->get_fabric_name(), + dest_tpg->se_tpg_tfo->get_fabric_name(), dest_rtpi); core_scsi3_nodeacl_undepend_item(dest_node_acl); @@ -1686,7 +1686,7 @@ static int core_scsi3_decode_spec_i_port( #if 0 printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" " dest_se_deve mapped_lun: %u\n", - TPG_TFO(dest_tpg)->get_fabric_name(), + dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); #endif /* @@ -1741,7 +1741,7 @@ static int core_scsi3_decode_spec_i_port( * and then call __core_scsi3_add_registration() in the * 2nd loop which will never fail. */ - dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd), + dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, all_tg_pt, aptpl); if (!(dest_pr_reg)) { @@ -1787,12 +1787,12 @@ static int core_scsi3_decode_spec_i_port( prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); - __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl, + __core_scsi3_add_registration(cmd->se_lun->lun_se_dev, dest_node_acl, dest_pr_reg, 0, 0); printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" " registered Transport ID for Node: %s%s Mapped LUN:" - " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(), + " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, (prf_isid) ? &i_buf[0] : "", dest_se_deve->mapped_lun); @@ -1855,7 +1855,7 @@ static int __core_scsi3_update_aptpl_buf( { struct se_lun *lun; struct se_portal_group *tpg; - struct se_subsystem_dev *su_dev = SU_DEV(dev); + struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct t10_pr_registration *pr_reg; unsigned char tmp[512], isid_buf[32]; ssize_t len = 0; @@ -1873,8 +1873,8 @@ static int __core_scsi3_update_aptpl_buf( /* * Walk the registration list.. */ - spin_lock(&T10_RES(su_dev)->registration_lock); - list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + spin_lock(&su_dev->t10_pr.registration_lock); + list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, pr_reg_list) { tmp[0] = '\0'; @@ -1900,7 +1900,7 @@ static int __core_scsi3_update_aptpl_buf( "res_holder=1\nres_type=%02x\n" "res_scope=%02x\nres_all_tg_pt=%d\n" "mapped_lun=%u\n", reg_count, - TPG_TFO(tpg)->get_fabric_name(), + tpg->se_tpg_tfo->get_fabric_name(), pr_reg->pr_reg_nacl->initiatorname, isid_buf, pr_reg->pr_res_key, pr_reg->pr_res_type, pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, @@ -1910,7 +1910,7 @@ static int __core_scsi3_update_aptpl_buf( "initiator_fabric=%s\ninitiator_node=%s\n%s" "sa_res_key=%llu\nres_holder=0\n" "res_all_tg_pt=%d\nmapped_lun=%u\n", - reg_count, TPG_TFO(tpg)->get_fabric_name(), + reg_count, tpg->se_tpg_tfo->get_fabric_name(), pr_reg->pr_reg_nacl->initiatorname, isid_buf, pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, pr_reg->pr_res_mapped_lun); @@ -1919,8 +1919,8 @@ static int __core_scsi3_update_aptpl_buf( if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { printk(KERN_ERR "Unable to update renaming" " APTPL metadata\n"); - spin_unlock(&T10_RES(su_dev)->registration_lock); - return -1; + spin_unlock(&su_dev->t10_pr.registration_lock); + return -EMSGSIZE; } len += sprintf(buf+len, "%s", tmp); @@ -1929,21 +1929,21 @@ static int __core_scsi3_update_aptpl_buf( */ snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n" "tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:" - " %d\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_wwn(tpg), - TPG_TFO(tpg)->tpg_get_tag(tpg), + " %d\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { printk(KERN_ERR "Unable to update renaming" " APTPL metadata\n"); - spin_unlock(&T10_RES(su_dev)->registration_lock); - return -1; + spin_unlock(&su_dev->t10_pr.registration_lock); + return -EMSGSIZE; } len += sprintf(buf+len, "%s", tmp); reg_count++; } - spin_unlock(&T10_RES(su_dev)->registration_lock); + spin_unlock(&su_dev->t10_pr.registration_lock); if (!(reg_count)) len += sprintf(buf+len, "No Registrations or Reservations"); @@ -1975,7 +1975,7 @@ static int __core_scsi3_write_aptpl_to_file( unsigned char *buf, u32 pr_aptpl_buf_len) { - struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn; + struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; struct file *file; struct iovec iov[1]; mm_segment_t old_fs; @@ -1989,7 +1989,7 @@ static int __core_scsi3_write_aptpl_to_file( if (strlen(&wwn->unit_serial[0]) >= 512) { printk(KERN_ERR "WWN value for struct se_device does not fit" " into path buffer\n"); - return -1; + return -EMSGSIZE; } snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); @@ -1997,7 +1997,7 @@ static int __core_scsi3_write_aptpl_to_file( if (IS_ERR(file) || !file || !file->f_dentry) { printk(KERN_ERR "filp_open(%s) for APTPL metadata" " failed\n", path); - return -1; + return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); } iov[0].iov_base = &buf[0]; @@ -2014,7 +2014,7 @@ static int __core_scsi3_write_aptpl_to_file( if (ret < 0) { printk("Error writing APTPL metadata file: %s\n", path); filp_close(file, NULL); - return -1; + return -EIO; } filp_close(file, NULL); @@ -2049,14 +2049,14 @@ static int core_scsi3_update_and_write_aptpl( ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len, clear_aptpl_metadata); if (ret != 0) - return -1; + return ret; /* * __core_scsi3_write_aptpl_to_file() will call strlen() * on the passed buf to determine pr_aptpl_buf_len. */ ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0); if (ret != 0) - return -1; + return ret; return ret; } @@ -2070,13 +2070,13 @@ static int core_scsi3_emulate_pro_register( int spec_i_pt, int ignore_key) { - struct se_session *se_sess = SE_SESS(cmd); - struct se_device *dev = SE_DEV(cmd); + struct se_session *se_sess = cmd->se_sess; + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_dev_entry *se_deve; - struct se_lun *se_lun = SE_LUN(cmd); + struct se_lun *se_lun = cmd->se_lun; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; /* Used for APTPL metadata w/ UNREGISTER */ unsigned char *pr_aptpl_buf = NULL; unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; @@ -2089,9 +2089,9 @@ static int core_scsi3_emulate_pro_register( se_tpg = se_sess->se_tpg; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; - if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { + if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) { memset(&isid_buf[0], 0, PR_REG_ISID_LEN); - TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0], + se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0], PR_REG_ISID_LEN); isid_ptr = &isid_buf[0]; } @@ -2117,7 +2117,7 @@ static int core_scsi3_emulate_pro_register( * Port Endpoint that the PRO was received from on the * Logical Unit of the SCSI device server. */ - ret = core_scsi3_alloc_registration(SE_DEV(cmd), + ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, se_deve, isid_ptr, sa_res_key, all_tg_pt, aptpl, ignore_key, 0); @@ -2145,7 +2145,7 @@ static int core_scsi3_emulate_pro_register( */ if (!(aptpl)) { pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); printk("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER\n"); return 0; @@ -2155,10 +2155,10 @@ static int core_scsi3_emulate_pro_register( * update the APTPL metadata information using its * preallocated *pr_reg->pr_aptpl_buf. */ - pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), + pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, se_sess); - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) { @@ -2223,7 +2223,7 @@ static int core_scsi3_emulate_pro_register( */ if (!(sa_res_key)) { pr_holder = core_scsi3_check_implict_release( - SE_DEV(cmd), pr_reg); + cmd->se_lun->lun_se_dev, pr_reg); if (pr_holder < 0) { kfree(pr_aptpl_buf); core_scsi3_put_pr_reg(pr_reg); @@ -2260,7 +2260,7 @@ static int core_scsi3_emulate_pro_register( /* * Release the calling I_T Nexus registration now.. */ - __core_scsi3_free_registration(SE_DEV(cmd), pr_reg, + __core_scsi3_free_registration(cmd->se_lun->lun_se_dev, pr_reg, NULL, 1); /* * From spc4r17, section 5.7.11.3 Unregistering @@ -2315,11 +2315,11 @@ static int core_scsi3_emulate_pro_register( * READ_KEYS service action. */ pr_reg->pr_res_generation = core_scsi3_pr_generation( - SE_DEV(cmd)); + cmd->se_lun->lun_se_dev); pr_reg->pr_res_key = sa_res_key; printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" " Key for %s to: 0x%016Lx PRgeneration:" - " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(), + " 0x%08x\n", cmd->se_tfo->get_fabric_name(), (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", pr_reg->pr_reg_nacl->initiatorname, pr_reg->pr_res_key, pr_reg->pr_res_generation); @@ -2378,12 +2378,12 @@ static int core_scsi3_pro_reserve( int scope, u64 res_key) { - struct se_session *se_sess = SE_SESS(cmd); + struct se_session *se_sess = cmd->se_sess; struct se_dev_entry *se_deve; - struct se_lun *se_lun = SE_LUN(cmd); + struct se_lun *se_lun = cmd->se_lun; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_res_holder; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; char i_buf[PR_REG_ISID_ID_LEN]; int ret, prf_isid; @@ -2398,7 +2398,7 @@ static int core_scsi3_pro_reserve( /* * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -2459,9 +2459,9 @@ static int core_scsi3_pro_reserve( printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" " [%s]: %s while reservation already held by" " [%s]: %s, returning RESERVATION_CONFLICT\n", - CMD_TFO(cmd)->get_fabric_name(), + cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, - TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), pr_res_holder->pr_reg_nacl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -2482,9 +2482,9 @@ static int core_scsi3_pro_reserve( " [%s]: %s trying to change TYPE and/or SCOPE," " while reservation already held by [%s]: %s," " returning RESERVATION_CONFLICT\n", - CMD_TFO(cmd)->get_fabric_name(), + cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, - TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), pr_res_holder->pr_reg_nacl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -2518,16 +2518,16 @@ static int core_scsi3_pro_reserve( printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" " reservation holder TYPE: %s ALL_TG_PT: %d\n", - CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type), + cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", - CMD_TFO(cmd)->get_fabric_name(), + cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, (prf_isid) ? &i_buf[0] : ""); spin_unlock(&dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -2608,10 +2608,10 @@ static int core_scsi3_emulate_pro_release( u64 res_key) { struct se_device *dev = cmd->se_dev; - struct se_session *se_sess = SE_SESS(cmd); - struct se_lun *se_lun = SE_LUN(cmd); + struct se_session *se_sess = cmd->se_sess; + struct se_lun *se_lun = cmd->se_lun; struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; int ret, all_reg = 0; if (!(se_sess) || !(se_lun)) { @@ -2698,9 +2698,9 @@ static int core_scsi3_emulate_pro_release( " reservation from [%s]: %s with different TYPE " "and/or SCOPE while reservation already held by" " [%s]: %s, returning RESERVATION_CONFLICT\n", - CMD_TFO(cmd)->get_fabric_name(), + cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, - TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(), + pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(), pr_res_holder->pr_reg_nacl->initiatorname); spin_unlock(&dev->dev_reservation_lock); @@ -2758,7 +2758,7 @@ static int core_scsi3_emulate_pro_release( write_aptpl: if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -2775,15 +2775,15 @@ static int core_scsi3_emulate_pro_clear( { struct se_device *dev = cmd->se_dev; struct se_node_acl *pr_reg_nacl; - struct se_session *se_sess = SE_SESS(cmd); - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct se_session *se_sess = cmd->se_sess; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; u32 pr_res_mapped_lun = 0; int calling_it_nexus = 0; /* * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg_n)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -2846,10 +2846,10 @@ static int core_scsi3_emulate_pro_clear( spin_unlock(&pr_tmpl->registration_lock); printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", - CMD_TFO(cmd)->get_fabric_name()); + cmd->se_tfo->get_fabric_name()); if (pr_tmpl->pr_aptpl_active) { - core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" " for CLEAR\n"); } @@ -2954,13 +2954,13 @@ static int core_scsi3_pro_preempt( u64 sa_res_key, int abort) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_dev_entry *se_deve; struct se_node_acl *pr_reg_nacl; - struct se_session *se_sess = SE_SESS(cmd); + struct se_session *se_sess = cmd->se_sess; struct list_head preempt_and_abort_list; struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; u32 pr_res_mapped_lun = 0; int all_reg = 0, calling_it_nexus = 0, released_regs = 0; int prh_type = 0, prh_scope = 0, ret; @@ -2969,7 +2969,7 @@ static int core_scsi3_pro_preempt( return PYX_TRANSPORT_LU_COMM_FAILURE; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; - pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg_n)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -3111,7 +3111,7 @@ static int core_scsi3_pro_preempt( spin_unlock(&dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3121,7 +3121,7 @@ static int core_scsi3_pro_preempt( } core_scsi3_put_pr_reg(pr_reg_n); - core_scsi3_pr_generation(SE_DEV(cmd)); + core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); return 0; } /* @@ -3247,7 +3247,7 @@ static int core_scsi3_pro_preempt( } if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3256,7 +3256,7 @@ static int core_scsi3_pro_preempt( } core_scsi3_put_pr_reg(pr_reg_n); - core_scsi3_pr_generation(SE_DEV(cmd)); + core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); return 0; } @@ -3297,17 +3297,17 @@ static int core_scsi3_emulate_pro_register_and_move( int aptpl, int unreg) { - struct se_session *se_sess = SE_SESS(cmd); - struct se_device *dev = SE_DEV(cmd); + struct se_session *se_sess = cmd->se_sess; + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_dev_entry *se_deve, *dest_se_deve = NULL; - struct se_lun *se_lun = SE_LUN(cmd); + struct se_lun *se_lun = cmd->se_lun; struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; struct se_port *se_port; struct se_portal_group *se_tpg, *dest_se_tpg = NULL; struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; unsigned char *initiator_str; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tid_len, tmp_tid_len; @@ -3322,7 +3322,7 @@ static int core_scsi3_emulate_pro_register_and_move( memset(dest_iport, 0, 64); memset(i_buf, 0, PR_REG_ISID_ID_LEN); se_tpg = se_sess->se_tpg; - tf_ops = TPG_TFO(se_tpg); + tf_ops = se_tpg->se_tpg_tfo; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; /* * Follow logic from spc4r17 Section 5.7.8, Table 50 -- @@ -3330,7 +3330,7 @@ static int core_scsi3_emulate_pro_register_and_move( * * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg)) { printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" @@ -3384,7 +3384,7 @@ static int core_scsi3_emulate_pro_register_and_move( dest_se_tpg = se_port->sep_tpg; if (!(dest_se_tpg)) continue; - dest_tf_ops = TPG_TFO(dest_se_tpg); + dest_tf_ops = dest_se_tpg->se_tpg_tfo; if (!(dest_tf_ops)) continue; @@ -3612,7 +3612,7 @@ after_iport_check: dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, iport_ptr); if (!(dest_pr_reg)) { - ret = core_scsi3_alloc_registration(SE_DEV(cmd), + ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, 0, aptpl, 2, 1); if (ret != 0) { @@ -3683,12 +3683,12 @@ after_iport_check: */ if (!(aptpl)) { pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); printk("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER_AND_MOVE\n"); } else { pr_tmpl->pr_aptpl_active = 1; - ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd), + ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, &dest_pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) */ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) { - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; u64 res_key, sa_res_key; int sa, scope, type, aptpl; int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; @@ -3731,7 +3731,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) * FIXME: A NULL struct se_session pointer means an this is not coming from * a $FABRIC_MOD's nexus, but from internal passthrough ops. */ - if (!(SE_SESS(cmd))) + if (!(cmd->se_sess)) return PYX_TRANSPORT_LU_COMM_FAILURE; if (cmd->data_length < 24) { @@ -3827,10 +3827,10 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) */ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) { - struct se_device *se_dev = SE_DEV(cmd); - struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct se_device *se_dev = cmd->se_lun->lun_se_dev; + struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; u32 add_len = 0, off = 8; if (cmd->data_length < 8) { @@ -3839,13 +3839,13 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) return PYX_TRANSPORT_INVALID_CDB_FIELD; } - buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); - buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); - buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); - buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); + buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); + buf[3] = (su_dev->t10_pr.pr_generation & 0xff); - spin_lock(&T10_RES(su_dev)->registration_lock); - list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list, + spin_lock(&su_dev->t10_pr.registration_lock); + list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list, pr_reg_list) { /* * Check for overflow of 8byte PRI READ_KEYS payload and @@ -3865,7 +3865,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) add_len += 8; } - spin_unlock(&T10_RES(su_dev)->registration_lock); + spin_unlock(&su_dev->t10_pr.registration_lock); buf[4] = ((add_len >> 24) & 0xff); buf[5] = ((add_len >> 16) & 0xff); @@ -3882,10 +3882,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) */ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) { - struct se_device *se_dev = SE_DEV(cmd); - struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct se_device *se_dev = cmd->se_lun->lun_se_dev; + struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; u64 pr_res_key; u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ @@ -3895,10 +3895,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) return PYX_TRANSPORT_INVALID_CDB_FIELD; } - buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); - buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); - buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); - buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); + buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); + buf[3] = (su_dev->t10_pr.pr_generation & 0xff); spin_lock(&se_dev->dev_reservation_lock); pr_reg = se_dev->dev_pr_res_holder; @@ -3963,9 +3963,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) */ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); - struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + struct se_device *dev = cmd->se_lun->lun_se_dev; + struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; u16 add_len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { @@ -4014,13 +4014,13 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) */ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) { - struct se_device *se_dev = SE_DEV(cmd); + struct se_device *se_dev = cmd->se_lun->lun_se_dev; struct se_node_acl *se_nacl; - struct se_subsystem_dev *su_dev = SU_DEV(se_dev); + struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_tmp; - struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation; - unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf; + struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; + unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; u32 off = 8; /* off into first Full Status descriptor */ int format_code = 0; @@ -4031,10 +4031,10 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) return PYX_TRANSPORT_INVALID_CDB_FIELD; } - buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff); - buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff); - buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff); - buf[3] = (T10_RES(su_dev)->pr_generation & 0xff); + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); + buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); + buf[3] = (su_dev->t10_pr.pr_generation & 0xff); spin_lock(&pr_tmpl->registration_lock); list_for_each_entry_safe(pr_reg, pr_reg_tmp, @@ -4051,7 +4051,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) * Determine expected length of $FABRIC_MOD specific * TransportID full status descriptor.. */ - exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len( + exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len( se_tpg, se_nacl, pr_reg, &format_code); if ((exp_desc_len + add_len) > cmd->data_length) { @@ -4116,7 +4116,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) /* * Now, have the $FABRIC_MOD fill in the protocol identifier */ - desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg, + desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg, se_nacl, pr_reg, &format_code, &buf[off+4]); spin_lock(&pr_tmpl->registration_lock); @@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) int core_scsi3_emulate_pr(struct se_cmd *cmd) { - unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0]; + unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; struct se_device *dev = cmd->se_dev; /* * Following spc2r20 5.5.1 Reservations overview: @@ -4213,39 +4213,39 @@ static int core_pt_seq_non_holder( int core_setup_reservations(struct se_device *dev, int force_pt) { struct se_subsystem_dev *su_dev = dev->se_sub_dev; - struct t10_reservation_template *rest = &su_dev->t10_reservation; + struct t10_reservation *rest = &su_dev->t10_pr; /* * If this device is from Target_Core_Mod/pSCSI, use the reservations * of the Underlying SCSI hardware. In Linux/SCSI terms, this can * cause a problem because libata and some SATA RAID HBAs appear * under Linux/SCSI, but to emulate reservations themselves. */ - if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && - !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) { + if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) && + !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) { rest->res_type = SPC_PASSTHROUGH; rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" - " emulation\n", TRANSPORT(dev)->name); + " emulation\n", dev->transport->name); return 0; } /* * If SPC-3 or above is reported by real or emulated struct se_device, * use emulated Persistent Reservations. */ - if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) { + if (dev->transport->get_device_rev(dev) >= SCSI_3) { rest->res_type = SPC3_PERSISTENT_RESERVATIONS; rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" - " emulation\n", TRANSPORT(dev)->name); + " emulation\n", dev->transport->name); } else { rest->res_type = SPC2_RESERVATIONS; rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_scsi2_reservation_seq_non_holder; printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", - TRANSPORT(dev)->name); + dev->transport->name); } return 0; diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h index 5603bcfd86d3..c8f47d064584 100644 --- a/drivers/target/target_core_pr.h +++ b/drivers/target/target_core_pr.h @@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *, char *, u32); extern int core_scsi2_emulate_crh(struct se_cmd *); extern int core_scsi3_alloc_aptpl_registration( - struct t10_reservation_template *, u64, + struct t10_reservation *, u64, unsigned char *, unsigned char *, u32, unsigned char *, u16, u32, int, int, u8); extern int core_scsi3_check_aptpl_registration(struct se_device *, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 331d423fd0e0..44a79a5c6d32 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template; static void pscsi_req_done(struct request *, int); -/* pscsi_get_sh(): - * - * - */ -static struct Scsi_Host *pscsi_get_sh(u32 host_no) -{ - struct Scsi_Host *sh = NULL; - - sh = scsi_host_lookup(host_no); - if (IS_ERR(sh)) { - printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" - " %u\n", host_no); - return NULL; - } - - return sh; -} - /* pscsi_attach_hba(): * * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. @@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no) */ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) { - int hba_depth; struct pscsi_hba_virt *phv; phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); if (!(phv)) { printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); - return -1; + return -ENOMEM; } phv->phv_host_id = host_id; phv->phv_mode = PHV_VIRUTAL_HOST_ID; - hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; - atomic_set(&hba->left_queue_depth, hba_depth); - atomic_set(&hba->max_queue_depth, hba_depth); hba->hba_ptr = (void *)phv; printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, PSCSI_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" - " Target Core with TCQ Depth: %d\n", hba->hba_id, - atomic_read(&hba->max_queue_depth)); + printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n", + hba->hba_id); return 0; } @@ -130,7 +107,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) { struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; struct Scsi_Host *sh = phv->phv_lld_host; - int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; /* * Release the struct Scsi_Host */ @@ -140,8 +116,6 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) phv->phv_lld_host = NULL; phv->phv_mode = PHV_VIRUTAL_HOST_ID; - atomic_set(&hba->left_queue_depth, hba_depth); - atomic_set(&hba->max_queue_depth, hba_depth); printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" " %s\n", hba->hba_id, (sh->hostt->name) ? @@ -154,22 +128,12 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) * Otherwise, locate struct Scsi_Host from the original passed * pSCSI Host ID and enable for phba mode */ - sh = pscsi_get_sh(phv->phv_host_id); - if (!(sh)) { + sh = scsi_host_lookup(phv->phv_host_id); + if (IS_ERR(sh)) { printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" " phv_host_id: %d\n", phv->phv_host_id); - return -1; + return PTR_ERR(sh); } - /* - * Usually the SCSI LLD will use the hostt->can_queue value to define - * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set - * this at all and set sh->can_queue at runtime. - */ - hba_depth = (sh->hostt->can_queue > sh->can_queue) ? - sh->hostt->can_queue : sh->can_queue; - - atomic_set(&hba->left_queue_depth, hba_depth); - atomic_set(&hba->max_queue_depth, hba_depth); phv->phv_lld_host = sh; phv->phv_mode = PHV_LLD_SCSI_HOST_NO; @@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); if (!buf) - return -1; + return -ENOMEM; memset(cdb, 0, MAX_COMMAND_SIZE); cdb[0] = INQUIRY; @@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) out_free: kfree(buf); - return -1; + return -EPERM; } static void @@ -601,11 +565,11 @@ static struct se_device *pscsi_create_virtdevice( hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; sh = phv->phv_lld_host; } else { - sh = pscsi_get_sh(pdv->pdv_host_id); - if (!(sh)) { + sh = scsi_host_lookup(pdv->pdv_host_id); + if (IS_ERR(sh)) { printk(KERN_ERR "pSCSI: Unable to locate" " pdv_host_id: %d\n", pdv->pdv_host_id); - return ERR_PTR(-ENODEV); + return (struct se_device *) sh; } } } else { @@ -728,13 +692,12 @@ static int pscsi_transport_complete(struct se_task *task) */ if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && (status_byte(result) << 1) == SAM_STAT_GOOD) { - if (!TASK_CMD(task)->se_deve) + if (!task->task_se_cmd->se_deve) goto after_mode_sense; - if (TASK_CMD(task)->se_deve->lun_flags & + if (task->task_se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = (unsigned char *) - T_TASK(task->task_se_cmd)->t_task_buf; + unsigned char *buf = task->task_se_cmd->t_task->t_task_buf; if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -800,7 +763,7 @@ static struct se_task * pscsi_alloc_task(struct se_cmd *cmd) { struct pscsi_plugin_task *pt; - unsigned char *cdb = T_TASK(cmd)->t_task_cdb; + unsigned char *cdb = cmd->t_task->t_task_cdb; pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); if (!pt) { @@ -813,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd) * allocate the extended CDB buffer for per struct se_task context * pt->pscsi_cdb now. */ - if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { + if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) { pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); if (!(pt->pscsi_cdb)) { @@ -926,7 +889,7 @@ static void pscsi_free_task(struct se_task *task) * Release the extended CDB allocation from pscsi_alloc_task() * if one exists. */ - if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) + if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) kfree(pt->pscsi_cdb); /* * We do not release the bio(s) here associated with this task, as @@ -1030,7 +993,7 @@ static ssize_t pscsi_check_configfs_dev_params( !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" " scsi_lun_id= parameters\n"); - return -1; + return -EINVAL; } return 0; @@ -1291,7 +1254,7 @@ static int pscsi_map_task_SG(struct se_task *task) */ static int pscsi_map_task_non_SG(struct se_task *task) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; struct pscsi_plugin_task *pt = PSCSI_TASK(task); struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; int ret = 0; @@ -1303,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task) return 0; ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, - pt->pscsi_req, T_TASK(cmd)->t_task_buf, + pt->pscsi_req, cmd->t_task->t_task_buf, task->task_size, GFP_KERNEL); if (ret < 0) { printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); @@ -1400,13 +1363,11 @@ static inline void pscsi_process_SAM_status( pt->pscsi_result); task->task_scsi_status = SAM_STAT_CHECK_CONDITION; task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; - TASK_CMD(task)->transport_error_status = + task->task_se_cmd->transport_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; transport_complete_task(task, 0); break; } - - return; } static void pscsi_req_done(struct request *req, int uptodate) diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index a4cd5d352c3a..280b689379c3 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -2,7 +2,6 @@ #define TARGET_CORE_PSCSI_H #define PSCSI_VERSION "v4.0" -#define PSCSI_VIRTUAL_HBA_DEPTH 2048 /* used in pscsi_find_alloc_len() */ #ifndef INQUIRY_DATA_SIZE diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 7837dd365a9d..fbf06c3994fd 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -66,17 +66,14 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) rd_host->rd_host_id = host_id; - atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH); - atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH); hba->hba_ptr = (void *) rd_host; printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" - " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id, - rd_host->rd_host_id, atomic_read(&hba->max_queue_depth), - RD_MAX_SECTORS); + " MaxSectors: %u\n", hba->hba_id, + rd_host->rd_host_id, RD_MAX_SECTORS); return 0; } @@ -339,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd) printk(KERN_ERR "Unable to allocate struct rd_request\n"); return NULL; } - rd_req->rd_dev = SE_DEV(cmd)->dev_ptr; + rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr; return &rd_req->rd_task; } @@ -383,7 +380,7 @@ static int rd_MEMCPY_read(struct rd_request *req) table = rd_get_sg_table(dev, req->rd_page); if (!(table)) - return -1; + return -EINVAL; table_sg_end = (table->page_end_offset - req->rd_page); sg_d = task->task_sg; @@ -481,7 +478,7 @@ static int rd_MEMCPY_read(struct rd_request *req) #endif table = rd_get_sg_table(dev, req->rd_page); if (!(table)) - return -1; + return -EINVAL; sg_s = &table->sg_table[j = 0]; } @@ -506,7 +503,7 @@ static int rd_MEMCPY_write(struct rd_request *req) table = rd_get_sg_table(dev, req->rd_page); if (!(table)) - return -1; + return -EINVAL; table_sg_end = (table->page_end_offset - req->rd_page); sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; @@ -604,7 +601,7 @@ static int rd_MEMCPY_write(struct rd_request *req) #endif table = rd_get_sg_table(dev, req->rd_page); if (!(table)) - return -1; + return -EINVAL; sg_d = &table->sg_table[j = 0]; } @@ -623,11 +620,11 @@ static int rd_MEMCPY_do_task(struct se_task *task) unsigned long long lba; int ret; - req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE; + req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE; lba = task->task_lba; req->rd_offset = (do_div(lba, - (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) * - DEV_ATTRIB(dev)->block_size; + (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) * + dev->se_sub_dev->se_dev_attrib.block_size; req->rd_size = task->task_size; if (task->task_data_direction == DMA_FROM_DEVICE) @@ -664,7 +661,7 @@ static int rd_DIRECT_with_offset( table = rd_get_sg_table(dev, req->rd_page); if (!(table)) - return -1; + return -EINVAL; table_sg_end = (table->page_end_offset - req->rd_page); sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; @@ -678,7 +675,7 @@ static int rd_DIRECT_with_offset( se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); if (!(se_mem)) { printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(&se_mem->se_list); @@ -734,13 +731,13 @@ check_eot: #endif table = rd_get_sg_table(dev, req->rd_page); if (!(table)) - return -1; + return -EINVAL; sg_s = &table->sg_table[j = 0]; } out: - T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; #ifdef DEBUG_RAMDISK_DR printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", *se_mem_cnt); @@ -767,7 +764,7 @@ static int rd_DIRECT_without_offset( table = rd_get_sg_table(dev, req->rd_page); if (!(table)) - return -1; + return -EINVAL; sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; #ifdef DEBUG_RAMDISK_DR @@ -780,7 +777,7 @@ static int rd_DIRECT_without_offset( se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); if (!(se_mem)) { printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(&se_mem->se_list); @@ -816,13 +813,13 @@ static int rd_DIRECT_without_offset( #endif table = rd_get_sg_table(dev, req->rd_page); if (!(table)) - return -1; + return -EINVAL; sg_s = &table->sg_table[j = 0]; } out: - T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; #ifdef DEBUG_RAMDISK_DR printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", *se_mem_cnt); @@ -848,13 +845,11 @@ static int rd_DIRECT_do_se_mem_map( u32 task_offset = *task_offset_in; unsigned long long lba; int ret; + int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size; - req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) / - PAGE_SIZE); lba = task->task_lba; - req->rd_offset = (do_div(lba, - (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) * - DEV_ATTRIB(task->se_dev)->block_size; + req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE); + req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size; req->rd_size = task->task_size; if (req->rd_offset) @@ -867,7 +862,7 @@ static int rd_DIRECT_do_se_mem_map( if (ret < 0) return ret; - if (CMD_TFO(cmd)->task_sg_chaining == 0) + if (cmd->se_tfo->task_sg_chaining == 0) return 0; /* * Currently prevent writers from multiple HW fabrics doing @@ -876,7 +871,7 @@ static int rd_DIRECT_do_se_mem_map( if (cmd->data_direction == DMA_TO_DEVICE) { printk(KERN_ERR "DMA_TO_DEVICE not supported for" " RAMDISK_DR with task_sg_chaining=1\n"); - return -1; + return -ENOSYS; } /* * Special case for if task_sg_chaining is enabled, then @@ -884,14 +879,15 @@ static int rd_DIRECT_do_se_mem_map( * transport_do_task_sg_chain() for creating chainged SGLs * across multiple struct se_task->task_sg[]. */ - if (!(transport_calc_sg_num(task, - list_entry(T_TASK(cmd)->t_mem_list->next, + ret = transport_init_task_sg(task, + list_entry(cmd->t_task->t_mem_list->next, struct se_mem, se_list), - task_offset))) - return -1; + task_offset); + if (ret <= 0) + return ret; return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - list_entry(T_TASK(cmd)->t_mem_list->next, + list_entry(cmd->t_task->t_mem_list->next, struct se_mem, se_list), out_se_mem, se_mem_cnt, task_offset_in); } @@ -975,7 +971,7 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { printk(KERN_INFO "Missing rd_pages= parameter\n"); - return -1; + return -EINVAL; } return 0; @@ -1021,7 +1017,7 @@ static sector_t rd_get_blocks(struct se_device *dev) { struct rd_dev *rd_dev = dev->dev_ptr; unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / - DEV_ATTRIB(dev)->block_size) - 1; + dev->se_sub_dev->se_dev_attrib.block_size) - 1; return blocks_long; } diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 3ea19e29d8ec..bab93020a3a9 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -7,8 +7,6 @@ /* Largest piece of memory kmalloc can allocate */ #define RD_MAX_ALLOCATION_SIZE 65536 -/* Maximum queuedepth for the Ramdisk HBA */ -#define RD_HBA_QUEUE_DEPTH 256 #define RD_DEVICE_QUEUE_DEPTH 32 #define RD_MAX_DEVICE_QUEUE_DEPTH 128 #define RD_BLOCKSIZE 512 diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index 5e3a067a7475..a8d6e1dee938 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name( return -ENODEV; /* scsiLuWwnName */ return snprintf(page, PAGE_SIZE, "%s\n", - (strlen(DEV_T10_WWN(dev)->unit_serial)) ? - (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None"); + (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ? + dev->se_sub_dev->t10_wwn.unit_serial : "None"); } DEV_STAT_SCSI_LU_ATTR_RO(lu_name); @@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend( struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_subsystem_dev, dev_stat_grps); struct se_device *dev = se_subdev->se_dev_ptr; - int j; - char str[28]; + int i; + char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1]; if (!dev) return -ENODEV; + /* scsiLuVendorId */ - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - for (j = 0; j < 8; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? - DEV_T10_WWN(dev)->vendor[j] : 0x20; - str[8] = 0; + for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) + str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ? + dev->se_sub_dev->t10_wwn.vendor[i] : ' '; + str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(vend); @@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod( struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_subsystem_dev, dev_stat_grps); struct se_device *dev = se_subdev->se_dev_ptr; - int j; - char str[28]; + int i; + char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1]; if (!dev) return -ENODEV; /* scsiLuProductId */ - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - for (j = 0; j < 16; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? - DEV_T10_WWN(dev)->model[j] : 0x20; - str[16] = 0; + for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++) + str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ? + dev->se_sub_dev->t10_wwn.model[i] : ' '; + str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(prod); @@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev( struct se_subsystem_dev *se_subdev = container_of(sgrps, struct se_subsystem_dev, dev_stat_grps); struct se_device *dev = se_subdev->se_dev_ptr; - int j; - char str[28]; + int i; + char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1]; if (!dev) return -ENODEV; /* scsiLuRevisionId */ - memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); - for (j = 0; j < 4; j++) - str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? - DEV_T10_WWN(dev)->revision[j] : 0x20; - str[4] = 0; + for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++) + str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ? + dev->se_sub_dev->t10_wwn.revision[i] : ' '; + str[i] = '\0'; return snprintf(page, PAGE_SIZE, "%s\n", str); } DEV_STAT_SCSI_LU_ATTR_RO(rev); @@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type( /* scsiLuPeripheralType */ return snprintf(page, PAGE_SIZE, "%u\n", - TRANSPORT(dev)->get_device_type(dev)); + dev->transport->get_device_type(dev)); } DEV_STAT_SCSI_LU_ATTR_RO(dev_type); @@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = { */ void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev) { - struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group; + struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group; - config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group, + config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group, "scsi_dev", &target_stat_scsi_dev_cit); - config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group, + config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group, "scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit); - config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group, + config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group, "scsi_lu", &target_stat_scsi_lu_cit); - dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group; - dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group; - dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group; + dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group; + dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group; + dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group; dev_stat_grp->default_groups[3] = NULL; } @@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name( tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n", - TPG_TFO(tpg)->get_fabric_name(), sep->sep_index); + tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index); spin_unlock(&lun->lun_sep_lock); return ret; } @@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index( tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%s%s%d\n", - TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+", + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&lun->lun_sep_lock); return ret; } @@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device( tpg = sep->sep_tpg; /* scsiTransportType */ ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); spin_unlock(&lun->lun_sep_lock); return ret; } @@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx( } tpg = sep->sep_tpg; ret = snprintf(page, PAGE_SIZE, "%u\n", - TPG_TFO(tpg)->tpg_get_inst_index(tpg)); + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock(&lun->lun_sep_lock); return ret; } @@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name( return -ENODEV; } tpg = sep->sep_tpg; - wwn = DEV_T10_WWN(dev); + wwn = &dev->se_sub_dev->t10_wwn; /* scsiTransportDevName */ ret = snprintf(page, PAGE_SIZE, "%s+%s\n", - TPG_TFO(tpg)->tpg_get_wwn(tpg), + tpg->se_tpg_tfo->tpg_get_wwn(tpg), (strlen(wwn->unit_serial)) ? wwn->unit_serial : wwn->vendor); spin_unlock(&lun->lun_sep_lock); @@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = { */ void target_stat_setup_port_default_groups(struct se_lun *lun) { - struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group; + struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group; - config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group, + config_group_init_type_name(&lun->port_stat_grps.scsi_port_group, "scsi_port", &target_stat_scsi_port_cit); - config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group, + config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group, "scsi_tgt_port", &target_stat_scsi_tgt_port_cit); - config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group, + config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group, "scsi_transport", &target_stat_scsi_transport_cit); - port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group; - port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group; - port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group; + port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group; + port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group; + port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group; port_stat_grp->default_groups[3] = NULL; } @@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst( tpg = nacl->se_tpg; /* scsiInstIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", - TPG_TFO(tpg)->tpg_get_inst_index(tpg)); + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } @@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port( } tpg = nacl->se_tpg; /* scsiAuthIntrTgtPortIndex */ - ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } @@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst( tpg = nacl->se_tpg; /* scsiInstIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", - TPG_TFO(tpg)->tpg_get_inst_index(tpg)); + tpg->se_tpg_tfo->tpg_get_inst_index(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } @@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port( } tpg = nacl->se_tpg; /* scsiPortIndex */ - ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg)); + ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_irq(&nacl->device_list_lock); return ret; } @@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx( tpg = nacl->se_tpg; /* scsiAttIntrPortIndex */ ret = snprintf(page, PAGE_SIZE, "%u\n", - TPG_TFO(tpg)->sess_get_index(se_sess)); + tpg->se_tpg_tfo->sess_get_index(se_sess)); spin_unlock_irq(&nacl->nacl_sess_lock); return ret; } @@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident( tpg = nacl->se_tpg; /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */ memset(buf, 0, 64); - if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) - TPG_TFO(tpg)->sess_get_initiator_sid(se_sess, + if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) + tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, (unsigned char *)&buf[0], 64); ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf); @@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = { */ void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl) { - struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group; + struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group; - config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group, + config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group, "scsi_auth_intr", &target_stat_scsi_auth_intr_cit); - config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group, + config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group, "scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit); - ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group; - ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group; + ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group; + ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group; ml_stat_grp->default_groups[2] = NULL; } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 179063d81cdd..2f73749b8151 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -117,7 +117,7 @@ int core_tmr_lun_reset( struct se_queue_req *qr, *qr_tmp; struct se_node_acl *tmr_nacl = NULL; struct se_portal_group *tmr_tpg = NULL; - struct se_queue_obj *qobj = dev->dev_queue_obj; + struct se_queue_obj *qobj = &dev->dev_queue_obj; struct se_tmr_req *tmr_p, *tmr_pp; struct se_task *task, *task_tmp; unsigned long flags; @@ -133,7 +133,7 @@ int core_tmr_lun_reset( * which the command was received shall be completed with TASK ABORTED * status (see SAM-4). */ - tas = DEV_ATTRIB(dev)->emulate_tas; + tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; /* * Determine if this se_tmr is coming from a $FABRIC_MOD * or struct se_device passthrough.. @@ -144,13 +144,13 @@ int core_tmr_lun_reset( if (tmr_nacl && tmr_tpg) { DEBUG_LR("LUN_RESET: TMR caller fabric: %s" " initiator port %s\n", - TPG_TFO(tmr_tpg)->get_fabric_name(), + tmr_tpg->se_tpg_tfo->get_fabric_name(), tmr_nacl->initiatorname); } } DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", (preempt_and_abort_list) ? "Preempt" : "TMR", - TRANSPORT(dev)->name, tas); + dev->transport->name, tas); /* * Release all pending and outgoing TMRs aside from the received * LUN_RESET tmr.. @@ -179,14 +179,14 @@ int core_tmr_lun_reset( continue; spin_unlock(&dev->se_tmr_lock); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + if (!(atomic_read(&cmd->t_task->t_transport_active))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } @@ -194,7 +194,7 @@ int core_tmr_lun_reset( " Response: 0x%02x, t_state: %d\n", (preempt_and_abort_list) ? "Preempt" : "", tmr_p, tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_cmd_finish_abort_tmr(cmd); spin_lock(&dev->se_tmr_lock); @@ -224,16 +224,16 @@ int core_tmr_lun_reset( spin_lock_irqsave(&dev->execute_task_lock, flags); list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, t_state_list) { - if (!(TASK_CMD(task))) { - printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); + if (!task->task_se_cmd) { + printk(KERN_ERR "task->task_se_cmd is NULL!\n"); continue; } - cmd = TASK_CMD(task); + cmd = task->task_se_cmd; - if (!T_TASK(cmd)) { - printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" + if (!cmd->t_task) { + printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:" " %p ITT: 0x%08x\n", task, cmd, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); continue; } /* @@ -254,38 +254,38 @@ int core_tmr_lun_reset( atomic_set(&task->task_state_active, 0); spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" "def_t_state: %d/%d cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, - CMD_TFO(cmd)->get_task_tag(cmd), 0, - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]); + cmd->se_tfo->get_task_tag(cmd), 0, + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, + cmd->deferred_t_state, cmd->t_task->t_task_cdb[0]); DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", - CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key, - T_TASK(cmd)->t_task_cdbs, - atomic_read(&T_TASK(cmd)->t_task_cdbs_left), - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), - atomic_read(&T_TASK(cmd)->t_transport_active), - atomic_read(&T_TASK(cmd)->t_transport_stop), - atomic_read(&T_TASK(cmd)->t_transport_sent)); + cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, + cmd->t_task->t_task_cdbs, + atomic_read(&cmd->t_task->t_task_cdbs_left), + atomic_read(&cmd->t_task->t_task_cdbs_sent), + atomic_read(&cmd->t_task->t_transport_active), + atomic_read(&cmd->t_task->t_transport_stop), + atomic_read(&cmd->t_task->t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" " for dev: %p\n", task, dev); wait_for_completion(&task->task_stop_comp); DEBUG_LR("LUN_RESET Completed task: %p shutdown for" " dev: %p\n", task, dev); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + atomic_dec(&cmd->t_task->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -295,24 +295,24 @@ int core_tmr_lun_reset( } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { + if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - fe_count = atomic_read(&T_TASK(cmd)->t_fe_count); + fe_count = atomic_read(&cmd->t_task->t_fe_count); - if (atomic_read(&T_TASK(cmd)->t_transport_active)) { + if (atomic_read(&cmd->t_task->t_transport_active)) { DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + atomic_set(&cmd->t_task->t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); @@ -321,8 +321,8 @@ int core_tmr_lun_reset( } DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&cmd->t_task->t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -365,7 +365,7 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - atomic_dec(&T_TASK(cmd)->t_transport_queue_active); + atomic_dec(&cmd->t_task->t_transport_queue_active); atomic_dec(&qobj->queue_cnt); list_del(&qr->qr_list); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -376,7 +376,7 @@ int core_tmr_lun_reset( DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, state, - atomic_read(&T_TASK(cmd)->t_fe_count)); + atomic_read(&cmd->t_task->t_fe_count)); /* * Signal that the command has failed via cmd->se_cmd_flags, * and call TFO->new_cmd_failure() to wakeup any fabric @@ -388,7 +388,7 @@ int core_tmr_lun_reset( transport_new_cmd_failure(cmd); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, - atomic_read(&T_TASK(cmd)->t_fe_count)); + atomic_read(&cmd->t_task->t_fe_count)); spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -411,6 +411,6 @@ int core_tmr_lun_reset( DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", (preempt_and_abort_list) ? "Preempt" : "TMR", - TRANSPORT(dev)->name); + dev->transport->name); return 0; } diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 5ec745fed931..448129f74cf9 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -44,6 +44,12 @@ #include #include "target_core_hba.h" +#include "target_core_stat.h" + +extern struct se_device *g_lun0_dev; + +static DEFINE_SPINLOCK(tpg_lock); +static LIST_HEAD(tpg_list); /* core_clear_initiator_node_from_tpg(): * @@ -68,7 +74,7 @@ static void core_clear_initiator_node_from_tpg( if (!deve->se_lun) { printk(KERN_ERR "%s device entries device pointer is" " NULL, but Initiator has access.\n", - TPG_TFO(tpg)->get_fabric_name()); + tpg->se_tpg_tfo->get_fabric_name()); continue; } @@ -171,7 +177,7 @@ void core_tpg_add_node_to_devs( * By default in LIO-Target $FABRIC_MOD, * demo_mode_write_protect is ON, or READ_ONLY; */ - if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) { + if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) { if (dev->dev_flags & DF_READ_ONLY) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; else @@ -181,7 +187,7 @@ void core_tpg_add_node_to_devs( * Allow only optical drives to issue R/W in default RO * demo mode. */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) + if (dev->transport->get_device_type(dev) == TYPE_DISK) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; else lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; @@ -189,8 +195,8 @@ void core_tpg_add_node_to_devs( printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" " access for LUN in Demo Mode\n", - TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? "READ-WRITE" : "READ-ONLY"); @@ -211,7 +217,7 @@ static int core_set_queue_depth_for_node( { if (!acl->queue_depth) { printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," - "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(), + "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); acl->queue_depth = 1; } @@ -233,7 +239,7 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl) if (!(nacl->device_list)) { printk(KERN_ERR "Unable to allocate memory for" " struct se_node_acl->device_list\n"); - return -1; + return -ENOMEM; } for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { deve = &nacl->device_list[i]; @@ -262,10 +268,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( if ((acl)) return acl; - if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg))) + if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))) return NULL; - acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg); + acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); if (!(acl)) return NULL; @@ -274,23 +280,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( spin_lock_init(&acl->device_list_lock); spin_lock_init(&acl->nacl_sess_lock); atomic_set(&acl->acl_pr_ref_count, 0); - acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); + acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); spin_lock_init(&acl->stats_lock); acl->dynamic_node_acl = 1; - TPG_TFO(tpg)->set_default_node_attributes(acl); + tpg->se_tpg_tfo->set_default_node_attributes(acl); if (core_create_device_list_for_node(acl) < 0) { - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return NULL; } if (core_set_queue_depth_for_node(tpg, acl) < 0) { core_free_device_list_for_node(acl, tpg); - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return NULL; } @@ -302,9 +308,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( spin_unlock_bh(&tpg->acl_node_lock); printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" - " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, - TPG_TFO(tpg)->get_fabric_name(), initiatorname); + " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, + tpg->se_tpg_tfo->get_fabric_name(), initiatorname); return acl; } @@ -355,8 +361,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( if (acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" - " for %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname); + " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); spin_unlock_bh(&tpg->acl_node_lock); /* * Release the locally allocated struct se_node_acl @@ -364,15 +370,15 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( * a pointer to an existing demo mode node ACL. */ if (se_nacl) - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, se_nacl); goto done; } printk(KERN_ERR "ACL entry for %s Initiator" " Node %s already exists for TPG %u, ignoring" - " request.\n", TPG_TFO(tpg)->get_fabric_name(), - initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); + " request.\n", tpg->se_tpg_tfo->get_fabric_name(), + initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_bh(&tpg->acl_node_lock); return ERR_PTR(-EEXIST); } @@ -400,16 +406,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); spin_lock_init(&acl->stats_lock); - TPG_TFO(tpg)->set_default_node_attributes(acl); + tpg->se_tpg_tfo->set_default_node_attributes(acl); if (core_create_device_list_for_node(acl) < 0) { - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return ERR_PTR(-ENOMEM); } if (core_set_queue_depth_for_node(tpg, acl) < 0) { core_free_device_list_for_node(acl, tpg); - TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); + tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl); return ERR_PTR(-EINVAL); } @@ -420,9 +426,9 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( done: printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" - " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, - TPG_TFO(tpg)->get_fabric_name(), initiatorname); + " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, + tpg->se_tpg_tfo->get_fabric_name(), initiatorname); return acl; } @@ -457,7 +463,7 @@ int core_tpg_del_initiator_node_acl( /* * Determine if the session needs to be closed by our context. */ - if (!(TPG_TFO(tpg)->shutdown_session(sess))) + if (!(tpg->se_tpg_tfo->shutdown_session(sess))) continue; spin_unlock_bh(&tpg->session_lock); @@ -465,7 +471,7 @@ int core_tpg_del_initiator_node_acl( * If the $FABRIC_MOD session for the Initiator Node ACL exists, * forcefully shutdown the $FABRIC_MOD session/nexus. */ - TPG_TFO(tpg)->close_session(sess); + tpg->se_tpg_tfo->close_session(sess); spin_lock_bh(&tpg->session_lock); } @@ -476,9 +482,9 @@ int core_tpg_del_initiator_node_acl( core_free_device_list_for_node(acl, tpg); printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" - " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, - TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname); + " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, + tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); return 0; } @@ -503,8 +509,8 @@ int core_tpg_set_initiator_node_queue_depth( if (!(acl)) { printk(KERN_ERR "Access Control List entry for %s Initiator" " Node %s does not exists for TPG %hu, ignoring" - " request.\n", TPG_TFO(tpg)->get_fabric_name(), - initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); + " request.\n", tpg->se_tpg_tfo->get_fabric_name(), + initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock_bh(&tpg->acl_node_lock); return -ENODEV; } @@ -525,7 +531,7 @@ int core_tpg_set_initiator_node_queue_depth( " operational. To forcefully change the queue" " depth and force session reinstatement" " use the \"force=1\" parameter.\n", - TPG_TFO(tpg)->get_fabric_name(), initiatorname); + tpg->se_tpg_tfo->get_fabric_name(), initiatorname); spin_unlock_bh(&tpg->session_lock); spin_lock_bh(&tpg->acl_node_lock); @@ -537,7 +543,7 @@ int core_tpg_set_initiator_node_queue_depth( /* * Determine if the session needs to be closed by our context. */ - if (!(TPG_TFO(tpg)->shutdown_session(sess))) + if (!(tpg->se_tpg_tfo->shutdown_session(sess))) continue; init_sess = sess; @@ -549,7 +555,7 @@ int core_tpg_set_initiator_node_queue_depth( * Change the value in the Node's struct se_node_acl, and call * core_set_queue_depth_for_node() to add the requested queue depth. * - * Finally call TPG_TFO(tpg)->close_session() to force session + * Finally call tpg->se_tpg_tfo->close_session() to force session * reinstatement to occur if there is an active session for the * $FABRIC_MOD Initiator Node in question. */ @@ -561,10 +567,10 @@ int core_tpg_set_initiator_node_queue_depth( * Force session reinstatement if * core_set_queue_depth_for_node() failed, because we assume * the $FABRIC_MOD has already the set session reinstatement - * bit from TPG_TFO(tpg)->shutdown_session() called above. + * bit from tpg->se_tpg_tfo->shutdown_session() called above. */ if (init_sess) - TPG_TFO(tpg)->close_session(init_sess); + tpg->se_tpg_tfo->close_session(init_sess); spin_lock_bh(&tpg->acl_node_lock); if (dynamic_acl) @@ -578,12 +584,12 @@ int core_tpg_set_initiator_node_queue_depth( * forcefully shutdown the $FABRIC_MOD session/nexus. */ if (init_sess) - TPG_TFO(tpg)->close_session(init_sess); + tpg->se_tpg_tfo->close_session(init_sess); printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" " Node: %s on %s Target Portal Group: %u\n", queue_depth, - initiatorname, TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg)); + initiatorname, tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_lock_bh(&tpg->acl_node_lock); if (dynamic_acl) @@ -597,7 +603,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) { /* Set in core_dev_setup_virtual_lun0() */ - struct se_device *dev = se_global->g_lun0_dev; + struct se_device *dev = g_lun0_dev; struct se_lun *lun = &se_tpg->tpg_virt_lun0; u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; int ret; @@ -614,7 +620,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); if (ret < 0) - return -1; + return ret; return 0; } @@ -663,7 +669,7 @@ int core_tpg_register( se_tpg->se_tpg_wwn = se_wwn; atomic_set(&se_tpg->tpg_pr_ref_count, 0); INIT_LIST_HEAD(&se_tpg->acl_node_list); - INIT_LIST_HEAD(&se_tpg->se_tpg_list); + INIT_LIST_HEAD(&se_tpg->se_tpg_node); INIT_LIST_HEAD(&se_tpg->tpg_sess_list); spin_lock_init(&se_tpg->acl_node_lock); spin_lock_init(&se_tpg->session_lock); @@ -676,9 +682,9 @@ int core_tpg_register( } } - spin_lock_bh(&se_global->se_tpg_lock); - list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list); - spin_unlock_bh(&se_global->se_tpg_lock); + spin_lock_bh(&tpg_lock); + list_add_tail(&se_tpg->se_tpg_node, &tpg_list); + spin_unlock_bh(&tpg_lock); printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), @@ -697,13 +703,13 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" " for endpoint: %s Portal Tag %u\n", (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? - "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(), - TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg), - TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); + "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), + se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg), + se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); - spin_lock_bh(&se_global->se_tpg_lock); - list_del(&se_tpg->se_tpg_list); - spin_unlock_bh(&se_global->se_tpg_lock); + spin_lock_bh(&tpg_lock); + list_del(&se_tpg->se_tpg_node); + spin_unlock_bh(&tpg_lock); while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) cpu_relax(); @@ -721,7 +727,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) core_tpg_wait_for_nacl_pr_ref(nacl); core_free_device_list_for_node(nacl, se_tpg); - TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); + se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); spin_lock_bh(&se_tpg->acl_node_lock); } @@ -745,9 +751,9 @@ struct se_lun *core_tpg_pre_addlun( if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" "-1: %u for Target Portal Group: %u\n", - TPG_TFO(tpg)->get_fabric_name(), + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); return ERR_PTR(-EOVERFLOW); } @@ -756,8 +762,8 @@ struct se_lun *core_tpg_pre_addlun( if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { printk(KERN_ERR "TPG Logical Unit Number: %u is already active" " on %s Target Portal Group: %u, ignoring request.\n", - unpacked_lun, TPG_TFO(tpg)->get_fabric_name(), - TPG_TFO(tpg)->tpg_get_tag(tpg)); + unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return ERR_PTR(-EINVAL); } @@ -772,8 +778,11 @@ int core_tpg_post_addlun( u32 lun_access, void *lun_ptr) { - if (core_dev_export(lun_ptr, tpg, lun) < 0) - return -1; + int ret; + + ret = core_dev_export(lun_ptr, tpg, lun); + if (ret < 0) + return ret; spin_lock(&tpg->tpg_lun_lock); lun->lun_access = lun_access; @@ -801,9 +810,9 @@ struct se_lun *core_tpg_pre_dellun( if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" "-1: %u for Target Portal Group: %u\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->tpg_get_tag(tpg)); return ERR_PTR(-EOVERFLOW); } @@ -812,8 +821,8 @@ struct se_lun *core_tpg_pre_dellun( if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { printk(KERN_ERR "%s Logical Unit Number: %u is not active on" " Target Portal Group: %u, ignoring request.\n", - TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, - TPG_TFO(tpg)->tpg_get_tag(tpg)); + tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, + tpg->se_tpg_tfo->tpg_get_tag(tpg)); spin_unlock(&tpg->tpg_lun_lock); return ERR_PTR(-ENODEV); } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 188225161a7e..e4406c9e66e0 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -184,7 +184,7 @@ #define DEBUG_STA(x...) #endif -struct se_global *se_global; +static int sub_api_initialized; static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; @@ -227,26 +227,8 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); static void transport_stop_all_task_timers(struct se_cmd *cmd); -int init_se_global(void) +int init_se_kmem_caches(void) { - struct se_global *global; - - global = kzalloc(sizeof(struct se_global), GFP_KERNEL); - if (!(global)) { - printk(KERN_ERR "Unable to allocate memory for struct se_global\n"); - return -1; - } - - INIT_LIST_HEAD(&global->g_lu_gps_list); - INIT_LIST_HEAD(&global->g_se_tpg_list); - INIT_LIST_HEAD(&global->g_hba_list); - INIT_LIST_HEAD(&global->g_se_dev_list); - spin_lock_init(&global->g_device_lock); - spin_lock_init(&global->hba_lock); - spin_lock_init(&global->se_tpg_lock); - spin_lock_init(&global->lu_gps_lock); - spin_lock_init(&global->plugin_class_lock); - se_cmd_cache = kmem_cache_create("se_cmd_cache", sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); if (!(se_cmd_cache)) { @@ -325,8 +307,6 @@ int init_se_global(void) goto out; } - se_global = global; - return 0; out: if (se_cmd_cache) @@ -349,18 +329,11 @@ out: kmem_cache_destroy(t10_alua_tg_pt_gp_cache); if (t10_alua_tg_pt_gp_mem_cache) kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); - kfree(global); - return -1; + return -ENOMEM; } -void release_se_global(void) +void release_se_kmem_caches(void) { - struct se_global *global; - - global = se_global; - if (!(global)) - return; - kmem_cache_destroy(se_cmd_cache); kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); @@ -371,23 +344,11 @@ void release_se_global(void) kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); - kfree(global); - - se_global = NULL; } -/* SCSI statistics table index */ -static struct scsi_index_table scsi_index_table; - -/* - * Initialize the index table for allocating unique row indexes to various mib - * tables. - */ -void init_scsi_index_table(void) -{ - memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); - spin_lock_init(&scsi_index_table.lock); -} +/* This code ensures unique mib indexes are handed out. */ +static DEFINE_SPINLOCK(scsi_mib_index_lock); +static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; /* * Allocate a new row index for the entry type specified @@ -396,16 +357,11 @@ u32 scsi_get_new_index(scsi_index_t type) { u32 new_index; - if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { - printk(KERN_ERR "Invalid index type %d\n", type); - return -EINVAL; - } + BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); - spin_lock(&scsi_index_table.lock); - new_index = ++scsi_index_table.scsi_mib_index[type]; - if (new_index == 0) - new_index = ++scsi_index_table.scsi_mib_index[type]; - spin_unlock(&scsi_index_table.lock); + spin_lock(&scsi_mib_index_lock); + new_index = ++scsi_mib_index[type]; + spin_unlock(&scsi_mib_index_lock); return new_index; } @@ -444,15 +400,18 @@ static int transport_subsystem_reqmods(void) int transport_subsystem_check_init(void) { - if (se_global->g_sub_api_initialized) + int ret; + + if (sub_api_initialized) return 0; /* * Request the loading of known TCM subsystem plugins.. */ - if (transport_subsystem_reqmods() < 0) - return -1; + ret = transport_subsystem_reqmods(); + if (ret < 0) + return ret; - se_global->g_sub_api_initialized = 1; + sub_api_initialized = 1; return 0; } @@ -497,9 +456,9 @@ void __transport_register_session( * If the fabric module supports an ISID based TransportID, * save this value in binary from the fabric I_T Nexus now. */ - if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) { + if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { memset(&buf[0], 0, PR_REG_ISID_LEN); - TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, + se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &buf[0], PR_REG_ISID_LEN); se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); } @@ -517,7 +476,7 @@ void __transport_register_session( list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", - TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr); + se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); } EXPORT_SYMBOL(__transport_register_session); @@ -591,7 +550,7 @@ void transport_deregister_session(struct se_session *se_sess) if ((se_nacl)) { spin_lock_bh(&se_tpg->acl_node_lock); if (se_nacl->dynamic_node_acl) { - if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache( + if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( se_tpg))) { list_del(&se_nacl->acl_list); se_tpg->num_node_acls--; @@ -599,7 +558,7 @@ void transport_deregister_session(struct se_session *se_sess) core_tpg_wait_for_nacl_pr_ref(se_nacl); core_free_device_list_for_node(se_nacl, se_tpg); - TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, + se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, se_nacl); spin_lock_bh(&se_tpg->acl_node_lock); } @@ -610,12 +569,12 @@ void transport_deregister_session(struct se_session *se_sess) transport_free_session(se_sess); printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", - TPG_TFO(se_tpg)->get_fabric_name()); + se_tpg->se_tpg_tfo->get_fabric_name()); } EXPORT_SYMBOL(transport_deregister_session); /* - * Called with T_TASK(cmd)->t_state_lock held. + * Called with cmd->t_task->t_state_lock held. */ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) { @@ -623,10 +582,10 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - if (!T_TASK(cmd)) + if (!cmd->t_task) return; - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { dev = task->se_dev; if (!(dev)) continue; @@ -640,11 +599,11 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) spin_lock_irqsave(&dev->execute_task_lock, flags); list_del(&task->t_state_list); DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", - CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task); + cmd->se_tfo->tfo_get_task_tag(cmd), dev, task); spin_unlock_irqrestore(&dev->execute_task_lock, flags); atomic_set(&task->task_state_active, 0); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left); + atomic_dec(&cmd->t_task->t_task_cdbs_ex_left); } } @@ -663,34 +622,34 @@ static int transport_cmd_check_stop( { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); /* * Determine if IOCTL context caller in requesting the stopping of this * command for LUN shutdown purposes. */ - if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { - DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)" + if (atomic_read(&cmd->t_task->transport_lun_stop)) { + DEBUG_CS("%s:%d atomic_read(&cmd->t_task->transport_lun_stop)" " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; - atomic_set(&T_TASK(cmd)->t_transport_active, 0); + atomic_set(&cmd->t_task->t_transport_active, 0); if (transport_off == 2) transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); - complete(&T_TASK(cmd)->transport_lun_stop_comp); + complete(&cmd->t_task->transport_lun_stop_comp); return 1; } /* * Determine if frontend context caller is requesting the stopping of - * this command for frontend excpections. + * this command for frontend exceptions. */ - if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { - DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) ==" + if (atomic_read(&cmd->t_task->t_transport_stop)) { + DEBUG_CS("%s:%d atomic_read(&cmd->t_task->t_transport_stop) ==" " TRUE for ITT: 0x%08x\n", __func__, __LINE__, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; @@ -703,13 +662,13 @@ static int transport_cmd_check_stop( */ if (transport_off == 2) cmd->se_lun = NULL; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); - complete(&T_TASK(cmd)->t_transport_stop_comp); + complete(&cmd->t_task->t_transport_stop_comp); return 1; } if (transport_off) { - atomic_set(&T_TASK(cmd)->t_transport_active, 0); + atomic_set(&cmd->t_task->t_transport_active, 0); if (transport_off == 2) { transport_all_task_dev_remove_state(cmd); /* @@ -722,20 +681,20 @@ static int transport_cmd_check_stop( * their internally allocated I/O reference now and * struct se_cmd now. */ - if (CMD_TFO(cmd)->check_stop_free != NULL) { + if (cmd->se_tfo->check_stop_free != NULL) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); - CMD_TFO(cmd)->check_stop_free(cmd); + cmd->se_tfo->check_stop_free(cmd); return 1; } } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return 0; } else if (t_state) cmd->t_state = t_state; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return 0; } @@ -747,30 +706,30 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) static void transport_lun_remove_cmd(struct se_cmd *cmd) { - struct se_lun *lun = SE_LUN(cmd); + struct se_lun *lun = cmd->se_lun; unsigned long flags; if (!lun) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + if (!(atomic_read(&cmd->t_task->transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); goto check_lun; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->t_task->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); check_lun: spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (atomic_read(&T_TASK(cmd)->transport_lun_active)) { + if (atomic_read(&cmd->t_task->transport_lun_active)) { list_del(&cmd->se_lun_list); - atomic_set(&T_TASK(cmd)->transport_lun_active, 0); + atomic_set(&cmd->t_task->transport_lun_active, 0); #if 0 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" - CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun); + cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); #endif } spin_unlock_irqrestore(&lun->lun_cmd_lock, flags); @@ -778,7 +737,7 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) @@ -789,7 +748,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) { - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); if (transport_cmd_check_stop_to_fabric(cmd)) return; @@ -802,7 +761,7 @@ static int transport_add_cmd_to_queue( int t_state) { struct se_device *dev = cmd->se_dev; - struct se_queue_obj *qobj = dev->dev_queue_obj; + struct se_queue_obj *qobj = &dev->dev_queue_obj; struct se_queue_req *qr; unsigned long flags; @@ -810,23 +769,23 @@ static int transport_add_cmd_to_queue( if (!(qr)) { printk(KERN_ERR "Unable to allocate memory for" " struct se_queue_req\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(&qr->qr_list); - qr->cmd = (void *)cmd; + qr->cmd = cmd; qr->state = t_state; if (t_state) { - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); cmd->t_state = t_state; - atomic_set(&T_TASK(cmd)->t_transport_active, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&cmd->t_task->t_transport_active, 1); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); list_add_tail(&qr->qr_list, &qobj->qobj_list); - atomic_inc(&T_TASK(cmd)->t_transport_queue_active); + atomic_inc(&cmd->t_task->t_transport_queue_active); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); atomic_inc(&qobj->queue_cnt); @@ -837,32 +796,9 @@ static int transport_add_cmd_to_queue( /* * Called with struct se_queue_obj->cmd_queue_lock held. */ -static struct se_queue_req * -__transport_get_qr_from_queue(struct se_queue_obj *qobj) -{ - struct se_cmd *cmd; - struct se_queue_req *qr = NULL; - - if (list_empty(&qobj->qobj_list)) - return NULL; - - list_for_each_entry(qr, &qobj->qobj_list, qr_list) - break; - - if (qr->cmd) { - cmd = (struct se_cmd *)qr->cmd; - atomic_dec(&T_TASK(cmd)->t_transport_queue_active); - } - list_del(&qr->qr_list); - atomic_dec(&qobj->queue_cnt); - - return qr; -} - static struct se_queue_req * transport_get_qr_from_queue(struct se_queue_obj *qobj) { - struct se_cmd *cmd; struct se_queue_req *qr; unsigned long flags; @@ -875,10 +811,9 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) list_for_each_entry(qr, &qobj->qobj_list, qr_list) break; - if (qr->cmd) { - cmd = (struct se_cmd *)qr->cmd; - atomic_dec(&T_TASK(cmd)->t_transport_queue_active); - } + if (qr->cmd) + atomic_dec(&qr->cmd->t_task->t_transport_queue_active); + list_del(&qr->qr_list); atomic_dec(&qobj->queue_cnt); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -889,32 +824,30 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj) { - struct se_cmd *q_cmd; struct se_queue_req *qr = NULL, *qr_p = NULL; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) { + if (!(atomic_read(&cmd->t_task->t_transport_queue_active))) { spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { - q_cmd = (struct se_cmd *)qr->cmd; - if (q_cmd != cmd) + if (qr->cmd != cmd) continue; - atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active); + atomic_dec(&qr->cmd->t_task->t_transport_queue_active); atomic_dec(&qobj->queue_cnt); list_del(&qr->qr_list); kfree(qr); } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) { + if (atomic_read(&cmd->t_task->t_transport_queue_active)) { printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", - CMD_TFO(cmd)->get_task_tag(cmd), - atomic_read(&T_TASK(cmd)->t_transport_queue_active)); + cmd->se_tfo->get_task_tag(cmd), + atomic_read(&cmd->t_task->t_transport_queue_active)); } } @@ -924,7 +857,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, */ void transport_complete_sync_cache(struct se_cmd *cmd, int good) { - struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next, + struct se_task *task = list_entry(cmd->t_task->t_task_list.next, struct se_task, t_list); if (good) { @@ -933,7 +866,7 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good) } else { task->task_scsi_status = SAM_STAT_CHECK_CONDITION; task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; - TASK_CMD(task)->transport_error_status = + task->task_se_cmd->transport_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST; } @@ -948,22 +881,18 @@ EXPORT_SYMBOL(transport_complete_sync_cache); */ void transport_complete_task(struct se_task *task, int success) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = task->se_dev; int t_state; unsigned long flags; #if 0 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, - T_TASK(cmd)->t_task_cdb[0], dev); + cmd->t_task->t_task_cdb[0], dev); #endif - if (dev) { - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); + if (dev) atomic_inc(&dev->depth_left); - atomic_inc(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); - } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); atomic_set(&task->task_active, 0); /* @@ -985,14 +914,14 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_stop)) { /* - * Decrement T_TASK(cmd)->t_se_count if this task had + * Decrement cmd->t_task->t_se_count if this task had * previously thrown its timeout exception handler. */ if (atomic_read(&task->task_timeout)) { - atomic_dec(&T_TASK(cmd)->t_se_count); + atomic_dec(&cmd->t_task->t_se_count); atomic_set(&task->task_timeout, 0); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); complete(&task->task_stop_comp); return; @@ -1004,33 +933,33 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_timeout)) { if (!(atomic_dec_and_test( - &T_TASK(cmd)->t_task_cdbs_timeout_left))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + &cmd->t_task->t_task_cdbs_timeout_left))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return; } t_state = TRANSPORT_COMPLETE_TIMEOUT; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); return; } - atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left); + atomic_dec(&cmd->t_task->t_task_cdbs_timeout_left); /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the * device queue depending upon int success. */ - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { + if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { if (!success) - T_TASK(cmd)->t_tasks_failed = 1; + cmd->t_task->t_tasks_failed = 1; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return; } - if (!success || T_TASK(cmd)->t_tasks_failed) { + if (!success || cmd->t_task->t_tasks_failed) { t_state = TRANSPORT_COMPLETE_FAILURE; if (!task->task_error_status) { task->task_error_status = @@ -1039,10 +968,10 @@ void transport_complete_task(struct se_task *task, int success) PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } } else { - atomic_set(&T_TASK(cmd)->t_transport_complete, 1); + atomic_set(&cmd->t_task->t_transport_complete, 1); t_state = TRANSPORT_COMPLETE_OK; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); } @@ -1125,7 +1054,7 @@ static void __transport_add_task_to_execute_queue( atomic_set(&task->task_state_active, 1); DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", - CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd), + task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), task, dev); } @@ -1135,8 +1064,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { dev = task->se_dev; if (atomic_read(&task->task_state_active)) @@ -1147,22 +1076,22 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) atomic_set(&task->task_state_active, 1); DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", - CMD_TFO(task->task_se_cmd)->get_task_tag( + task->se_cmd->se_tfo->get_task_tag( task->task_se_cmd), task, dev); spin_unlock(&dev->execute_task_lock); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); } static void transport_add_tasks_from_cmd(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_task *task, *task_prev = NULL; unsigned long flags; spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { if (atomic_read(&task->task_execute_queue)) continue; /* @@ -1174,30 +1103,6 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) task_prev = task; } spin_unlock_irqrestore(&dev->execute_task_lock, flags); - - return; -} - -/* transport_get_task_from_execute_queue(): - * - * Called with dev->execute_task_lock held. - */ -static struct se_task * -transport_get_task_from_execute_queue(struct se_device *dev) -{ - struct se_task *task; - - if (list_empty(&dev->execute_task_list)) - return NULL; - - list_for_each_entry(task, &dev->execute_task_list, t_execute_list) - break; - - list_del(&task->t_execute_list); - atomic_set(&task->task_execute_queue, 0); - atomic_dec(&dev->execute_tasks); - - return task; } /* transport_remove_task_from_execute_queue(): @@ -1269,7 +1174,7 @@ void transport_dump_dev_state( atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left), dev->queue_depth); *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n", - DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors); + dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors); *bl += sprintf(b + *bl, " "); } @@ -1284,28 +1189,28 @@ static void transport_release_all_cmds(struct se_device *dev) int bug_out = 0, t_state; unsigned long flags; - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); - list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list, + spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); + list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj.qobj_list, qr_list) { - cmd = (struct se_cmd *)qr->cmd; + cmd = qr->cmd; t_state = qr->state; list_del(&qr->qr_list); kfree(qr); - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, + spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," " t_state: %u directly\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), t_state); + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), t_state); transport_release_fe_cmd(cmd); bug_out = 1; - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); + spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); } - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); + spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); #if 0 if (bug_out) BUG(); @@ -1387,7 +1292,8 @@ int transport_dump_vpd_assoc( int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; - int ret = 0, len; + int ret = 0; + int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Identifier Association: "); @@ -1404,7 +1310,7 @@ int transport_dump_vpd_assoc( break; default: sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); - ret = -1; + ret = -EINVAL; break; } @@ -1434,7 +1340,8 @@ int transport_dump_vpd_ident_type( int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; - int ret = 0, len; + int ret = 0; + int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Identifier Type: "); @@ -1461,14 +1368,17 @@ int transport_dump_vpd_ident_type( default: sprintf(buf+len, "Unsupported: 0x%02x\n", vpd->device_identifier_type); - ret = -1; + ret = -EINVAL; break; } - if (p_buf) + if (p_buf) { + if (p_buf_len < strlen(buf)+1) + return -EINVAL; strncpy(p_buf, buf, p_buf_len); - else + } else { printk("%s", buf); + } return ret; } @@ -1511,7 +1421,7 @@ int transport_dump_vpd_ident( default: sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" " 0x%02x", vpd->device_identifier_code_set); - ret = -1; + ret = -EINVAL; break; } @@ -1569,20 +1479,20 @@ static void core_setup_task_attr_emulation(struct se_device *dev) * This is currently not available in upsream Linux/SCSI Target * mode code, and is assumed to be disabled while using TCM/pSCSI. */ - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH; return; } dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" - " device\n", TRANSPORT(dev)->name, - TRANSPORT(dev)->get_device_rev(dev)); + " device\n", dev->transport->name, + dev->transport->get_device_rev(dev)); } static void scsi_dump_inquiry(struct se_device *dev) { - struct t10_wwn *wwn = DEV_T10_WWN(dev); + struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn; int i, device_type; /* * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer @@ -1610,10 +1520,10 @@ static void scsi_dump_inquiry(struct se_device *dev) printk("\n"); - device_type = TRANSPORT(dev)->get_device_type(dev); + device_type = dev->transport->get_device_type(dev); printk(" Type: %s ", scsi_device_type(device_type)); printk(" ANSI SCSI revision: %02x\n", - TRANSPORT(dev)->get_device_rev(dev)); + dev->transport->get_device_rev(dev)); } struct se_device *transport_add_device_to_core_hba( @@ -1634,26 +1544,8 @@ struct se_device *transport_add_device_to_core_hba( printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); return NULL; } - dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL); - if (!(dev->dev_queue_obj)) { - printk(KERN_ERR "Unable to allocate memory for" - " dev->dev_queue_obj\n"); - kfree(dev); - return NULL; - } - transport_init_queue_obj(dev->dev_queue_obj); - - dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj), - GFP_KERNEL); - if (!(dev->dev_status_queue_obj)) { - printk(KERN_ERR "Unable to allocate memory for" - " dev->dev_status_queue_obj\n"); - kfree(dev->dev_queue_obj); - kfree(dev); - return NULL; - } - transport_init_queue_obj(dev->dev_status_queue_obj); + transport_init_queue_obj(&dev->dev_queue_obj); dev->dev_flags = device_flags; dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; dev->dev_ptr = (void *) transport_dev; @@ -1715,10 +1607,10 @@ struct se_device *transport_add_device_to_core_hba( * Startup the struct se_device processing thread */ dev->process_thread = kthread_run(transport_processing_thread, dev, - "LIO_%s", TRANSPORT(dev)->name); + "LIO_%s", dev->transport->name); if (IS_ERR(dev->process_thread)) { printk(KERN_ERR "Unable to create kthread: LIO_%s\n", - TRANSPORT(dev)->name); + dev->transport->name); goto out; } @@ -1730,16 +1622,16 @@ struct se_device *transport_add_device_to_core_hba( * originals once back into DEV_T10_WWN(dev) for the virtual device * setup. */ - if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { + if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { if (!(inquiry_prod) || !(inquiry_prod)) { printk(KERN_ERR "All non TCM/pSCSI plugins require" " INQUIRY consts\n"); goto out; } - strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8); - strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16); - strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4); + strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8); + strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16); + strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4); } scsi_dump_inquiry(dev); @@ -1754,8 +1646,6 @@ out: se_release_vpd_for_dev(dev); - kfree(dev->dev_status_queue_obj); - kfree(dev->dev_queue_obj); kfree(dev); return NULL; @@ -1794,7 +1684,7 @@ transport_generic_get_task(struct se_cmd *cmd, enum dma_data_direction data_direction) { struct se_task *task; - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; unsigned long flags; task = dev->transport->alloc_task(cmd); @@ -1807,14 +1697,14 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_execute_list); INIT_LIST_HEAD(&task->t_state_list); init_completion(&task->task_stop_comp); - task->task_no = T_TASK(cmd)->t_tasks_no++; + task->task_no = cmd->t_task->t_tasks_no++; task->task_se_cmd = cmd; task->se_dev = dev; task->task_data_direction = data_direction; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task->t_task_list); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return task; } @@ -1823,7 +1713,7 @@ static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); void transport_device_setup_cmd(struct se_cmd *cmd) { - cmd->se_dev = SE_LUN(cmd)->lun_se_dev; + cmd->se_dev = cmd->se_lun->lun_se_dev; } EXPORT_SYMBOL(transport_device_setup_cmd); @@ -1848,12 +1738,12 @@ void transport_init_se_cmd( */ cmd->t_task = &cmd->t_task_backstore; - INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list); - init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); - init_completion(&T_TASK(cmd)->transport_lun_stop_comp); - init_completion(&T_TASK(cmd)->t_transport_stop_comp); - spin_lock_init(&T_TASK(cmd)->t_state_lock); - atomic_set(&T_TASK(cmd)->transport_dev_active, 1); + INIT_LIST_HEAD(&cmd->t_task->t_task_list); + init_completion(&cmd->t_task->transport_lun_fe_stop_comp); + init_completion(&cmd->t_task->transport_lun_stop_comp); + init_completion(&cmd->t_task->t_transport_stop_comp); + spin_lock_init(&cmd->t_task->t_state_lock); + atomic_set(&cmd->t_task->transport_dev_active, 1); cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1870,19 +1760,19 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ - if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 0; if (cmd->sam_task_attr == MSG_ACA_TAG) { DEBUG_STA("SAM Task Attribute ACA" " emulation is not supported\n"); - return -1; + return -EINVAL; } /* * Used to determine when ORDERED commands should go from * Dormant to Active status. */ - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id); + cmd->se_ordered_id = atomic_inc_return(&cmd->se_lun->lun_se_dev->dev_ordered_id); smp_mb__after_atomic_inc(); DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, @@ -1898,8 +1788,8 @@ void transport_free_se_cmd( /* * Check and free any extended CDB buffer that was allocated */ - if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb) - kfree(T_TASK(se_cmd)->t_task_cdb); + if (se_cmd->t_task->t_task_cdb != se_cmd->t_task->__t_task_cdb) + kfree(se_cmd->t_task->t_task_cdb); } EXPORT_SYMBOL(transport_free_se_cmd); @@ -1931,33 +1821,33 @@ int transport_generic_allocate_tasks( printk(KERN_ERR "Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); - return -1; + return -EINVAL; } /* * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, * allocate the additional extended CDB buffer now.. Otherwise * setup the pointer from __t_task_cdb to t_task_cdb. */ - if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) { - T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb), + if (scsi_command_size(cdb) > sizeof(cmd->t_task->__t_task_cdb)) { + cmd->t_task->t_task_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(T_TASK(cmd)->t_task_cdb)) { - printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb" - " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n", + if (!(cmd->t_task->t_task_cdb)) { + printk(KERN_ERR "Unable to allocate cmd->t_task->t_task_cdb" + " %u > sizeof(cmd->t_task->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), - (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb)); - return -1; + (unsigned long)sizeof(cmd->t_task->__t_task_cdb)); + return -ENOMEM; } } else - T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0]; + cmd->t_task->t_task_cdb = &cmd->t_task->__t_task_cdb[0]; /* - * Copy the original CDB into T_TASK(cmd). + * Copy the original CDB into cmd->t_task. */ - memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb)); + memcpy(cmd->t_task->t_task_cdb, cdb, scsi_command_size(cdb)); /* * Setup the received CDB based on SCSI defined opcodes and * perform unit attention, persistent reservations and ALUA - * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb + * checks for virtual device backends. The cmd->t_task->t_task_cdb * pointer is expected to be setup before we reach this point. */ ret = transport_generic_cmd_sequencer(cmd, cdb); @@ -1986,10 +1876,10 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks); int transport_generic_handle_cdb( struct se_cmd *cmd) { - if (!SE_LUN(cmd)) { + if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; + printk(KERN_ERR "cmd->se_lun is NULL\n"); + return -EINVAL; } transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); @@ -2005,10 +1895,10 @@ EXPORT_SYMBOL(transport_generic_handle_cdb); int transport_generic_handle_cdb_map( struct se_cmd *cmd) { - if (!SE_LUN(cmd)) { + if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; + printk(KERN_ERR "cmd->se_lun is NULL\n"); + return -EINVAL; } transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); @@ -2030,7 +1920,7 @@ int transport_generic_handle_data( * in interrupt code, the signal_pending() check is skipped. */ if (!in_interrupt() && signal_pending(current)) - return -1; + return -EPERM; /* * If the received CDB has aleady been ABORTED by the generic * target engine, we now call transport_check_aborted_status() @@ -2078,14 +1968,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) int ret = 0; DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); /* * No tasks remain in the execution queue */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { + &cmd->t_task->t_task_list, t_list) { DEBUG_TS("task_no[%d] - Processing task %p\n", task->task_no, task); /* @@ -2094,14 +1984,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (!atomic_read(&task->task_sent) && !atomic_read(&task->task_active)) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_remove_task_from_execute_queue(task, task->se_dev); DEBUG_TS("task_no[%d] - Removed from execute queue\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); continue; } @@ -2111,7 +2001,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); DEBUG_TS("task_no[%d] - Waiting to complete\n", @@ -2120,8 +2010,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) DEBUG_TS("task_no[%d] - Stopped successfully\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + atomic_dec(&cmd->t_task->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -2132,21 +2022,11 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) __transport_stop_task_timer(task, &flags); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return ret; } -static void transport_failure_reset_queue_depth(struct se_device *dev) -{ - unsigned long flags; - - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); - atomic_inc(&dev->depth_left); - atomic_inc(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); -} - /* * Handle SAM-esque emulation for generic transport request failures. */ @@ -2157,28 +2037,28 @@ static void transport_generic_request_failure( int sc) { DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" - " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), - T_TASK(cmd)->t_task_cdb[0]); + " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->t_task->t_task_cdb[0]); DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" " %d/%d transport_error_status: %d\n", - CMD_TFO(cmd)->get_cmd_state(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, cmd->transport_error_status); DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" - " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs, - atomic_read(&T_TASK(cmd)->t_task_cdbs_left), - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), - atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left), - atomic_read(&T_TASK(cmd)->t_transport_active), - atomic_read(&T_TASK(cmd)->t_transport_stop), - atomic_read(&T_TASK(cmd)->t_transport_sent)); + " t_transport_sent: %d\n", cmd->t_task->t_task_cdbs, + atomic_read(&cmd->t_task->t_task_cdbs_left), + atomic_read(&cmd->t_task->t_task_cdbs_sent), + atomic_read(&cmd->t_task->t_task_cdbs_ex_left), + atomic_read(&cmd->t_task->t_transport_active), + atomic_read(&cmd->t_task->t_transport_stop), + atomic_read(&cmd->t_task->t_transport_sent)); transport_stop_all_task_timers(cmd); if (dev) - transport_failure_reset_queue_depth(dev); + atomic_inc(&dev->depth_left); /* * For SAM Task Attribute emulation for failed struct se_cmd */ @@ -2211,8 +2091,8 @@ static void transport_generic_request_failure( * we force this session to fall back to session * recovery. */ - CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess); - CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0); + cmd->se_tfo->fall_back_to_erl0(cmd->se_sess); + cmd->se_tfo->stop_session(cmd->se_sess, 0, 0); goto check_stop; case PYX_TRANSPORT_LU_COMM_FAILURE: @@ -2240,13 +2120,13 @@ static void transport_generic_request_failure( * * See spc4r17, section 7.4.6 Control Mode Page, Table 349 */ - if (SE_SESS(cmd) && - DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, + if (cmd->se_sess && + cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) + core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); goto check_stop; case PYX_TRANSPORT_USE_SENSE_REASON: /* @@ -2255,7 +2135,7 @@ static void transport_generic_request_failure( break; default: printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", - T_TASK(cmd)->t_task_cdb[0], + cmd->t_task->t_task_cdb[0], cmd->transport_error_status); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; @@ -2276,19 +2156,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + if (!(atomic_read(&cmd->t_task->t_transport_timeout))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return; } - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + if (atomic_read(&cmd->t_task->t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return; } - atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout), - &T_TASK(cmd)->t_se_count); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_sub(atomic_read(&cmd->t_task->t_transport_timeout), + &cmd->t_task->t_se_count); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); } static void transport_generic_request_timeout(struct se_cmd *cmd) @@ -2296,16 +2176,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) unsigned long flags; /* - * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove() + * Reset cmd->t_task->t_se_count to allow transport_generic_remove() * to allow last call to free memory resources. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) { - int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + if (atomic_read(&cmd->t_task->t_transport_timeout) > 1) { + int tmp = (atomic_read(&cmd->t_task->t_transport_timeout) - 1); - atomic_sub(tmp, &T_TASK(cmd)->t_se_count); + atomic_sub(tmp, &cmd->t_task->t_se_count); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_generic_remove(cmd, 0, 0); } @@ -2318,11 +2198,11 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) buf = kzalloc(data_length, GFP_KERNEL); if (!(buf)) { printk(KERN_ERR "Unable to allocate memory for buffer\n"); - return -1; + return -ENOMEM; } - T_TASK(cmd)->t_tasks_se_num = 0; - T_TASK(cmd)->t_task_buf = buf; + cmd->t_task->t_tasks_se_num = 0; + cmd->t_task->t_task_buf = buf; return 0; } @@ -2364,9 +2244,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); } /* @@ -2375,14 +2255,14 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) static void transport_task_timeout_handler(unsigned long data) { struct se_task *task = (struct se_task *)data; - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; unsigned long flags; DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); if (task->task_flags & TF_STOP) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return; } task->task_flags &= ~TF_RUNNING; @@ -2393,13 +2273,13 @@ static void transport_task_timeout_handler(unsigned long data) if (!(atomic_read(&task->task_active))) { DEBUG_TT("transport task: %p cmd: %p timeout task_active" " == 0\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return; } - atomic_inc(&T_TASK(cmd)->t_se_count); - atomic_inc(&T_TASK(cmd)->t_transport_timeout); - T_TASK(cmd)->t_tasks_failed = 1; + atomic_inc(&cmd->t_task->t_se_count); + atomic_inc(&cmd->t_task->t_transport_timeout); + cmd->t_task->t_tasks_failed = 1; atomic_set(&task->task_timeout, 1); task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; @@ -2408,28 +2288,28 @@ static void transport_task_timeout_handler(unsigned long data) if (atomic_read(&task->task_stop)) { DEBUG_TT("transport task: %p cmd: %p timeout task_stop" " == 1\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); complete(&task->task_stop_comp); return; } - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) { + if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { DEBUG_TT("transport task: %p cmd: %p timeout non zero" " t_task_cdbs_left\n", task, cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return; } DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", task, cmd); cmd->t_state = TRANSPORT_COMPLETE_FAILURE; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); } /* - * Called with T_TASK(cmd)->t_state_lock held. + * Called with cmd->t_task->t_state_lock held. */ static void transport_start_task_timer(struct se_task *task) { @@ -2441,7 +2321,7 @@ static void transport_start_task_timer(struct se_task *task) /* * If the task_timeout is disabled, exit now. */ - timeout = DEV_ATTRIB(dev)->task_timeout; + timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; if (!(timeout)) return; @@ -2459,21 +2339,21 @@ static void transport_start_task_timer(struct se_task *task) } /* - * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held. + * Called with spin_lock_irq(&cmd->t_task->t_state_lock) held. */ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) { - struct se_cmd *cmd = TASK_CMD(task); + struct se_cmd *cmd = task->task_se_cmd; if (!(task->task_flags & TF_RUNNING)) return; task->task_flags |= TF_STOP; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, *flags); del_timer_sync(&task->task_timer); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, *flags); task->task_flags &= ~TF_RUNNING; task->task_flags &= ~TF_STOP; } @@ -2483,11 +2363,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd) struct se_task *task = NULL, *task_tmp; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) + &cmd->t_task->t_task_list, t_list) __transport_stop_task_timer(task, &flags); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); } static inline int transport_tcq_window_closed(struct se_device *dev) @@ -2498,7 +2378,7 @@ static inline int transport_tcq_window_closed(struct se_device *dev) } else msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG); - wake_up_interruptible(&dev->dev_queue_obj->thread_wq); + wake_up_interruptible(&dev->dev_queue_obj.thread_wq); return 0; } @@ -2511,45 +2391,45 @@ static inline int transport_tcq_window_closed(struct se_device *dev) */ static inline int transport_execute_task_attr(struct se_cmd *cmd) { - if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 1; /* * Check for the existence of HEAD_OF_QUEUE, and if true return 1 * to allow the passed struct se_cmd list of tasks to the front of the list. */ if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_inc(&SE_DEV(cmd)->dev_hoq_count); + atomic_inc(&cmd->se_lun->lun_se_dev->dev_hoq_count); smp_mb__after_atomic_inc(); DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], + cmd->t_task->t_task_cdb[0], cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&SE_DEV(cmd)->ordered_cmd_lock); + spin_lock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); list_add_tail(&cmd->se_ordered_list, - &SE_DEV(cmd)->ordered_cmd_list); - spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock); + &cmd->se_lun->lun_se_dev->ordered_cmd_list); + spin_unlock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); - atomic_inc(&SE_DEV(cmd)->dev_ordered_sync); + atomic_inc(&cmd->se_lun->lun_se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" " list, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], + cmd->t_task->t_task_cdb[0], cmd->se_ordered_id); /* * Add ORDERED command to tail of execution queue if * no other older commands exist that need to be * completed first. */ - if (!(atomic_read(&SE_DEV(cmd)->simple_cmds))) + if (!(atomic_read(&cmd->se_lun->lun_se_dev->simple_cmds))) return 1; } else { /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc(&SE_DEV(cmd)->simple_cmds); + atomic_inc(&cmd->se_lun->lun_se_dev->simple_cmds); smp_mb__after_atomic_inc(); } /* @@ -2557,20 +2437,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * add the dormant task(s) built for the passed struct se_cmd to the * execution queue and become in Active state for this struct se_device. */ - if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) { + if (atomic_read(&cmd->se_lun->lun_se_dev->dev_ordered_sync) != 0) { /* * Otherwise, add cmd w/ tasks to delayed cmd queue that * will be drained upon completion of HEAD_OF_QUEUE task. */ - spin_lock(&SE_DEV(cmd)->delayed_cmd_lock); + spin_lock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; list_add_tail(&cmd->se_delayed_list, - &SE_DEV(cmd)->delayed_cmd_list); - spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock); + &cmd->se_lun->lun_se_dev->delayed_cmd_list); + spin_unlock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" " delayed CMD list, se_ordered_id: %u\n", - T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr, + cmd->t_task->t_task_cdb[0], cmd->sam_task_attr, cmd->se_ordered_id); /* * Return zero to let transport_execute_tasks() know @@ -2610,7 +2490,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) * attribute for the tasks of the received struct se_cmd CDB */ add_tasks = transport_execute_task_attr(cmd); - if (add_tasks == 0) + if (!add_tasks) goto execute_tasks; /* * This calls transport_add_tasks_from_cmd() to handle @@ -2625,7 +2505,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) * storage object. */ execute_tasks: - __transport_execute_tasks(SE_DEV(cmd)); + __transport_execute_tasks(cmd->se_lun->lun_se_dev); return 0; } @@ -2639,7 +2519,7 @@ static int __transport_execute_tasks(struct se_device *dev) { int error; struct se_cmd *cmd = NULL; - struct se_task *task; + struct se_task *task = NULL; unsigned long flags; /* @@ -2647,43 +2527,41 @@ static int __transport_execute_tasks(struct se_device *dev) * struct se_transport_task's to the selected transport. */ check_depth: - spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags); - if (!(atomic_read(&dev->depth_left)) || - !(atomic_read(&SE_HBA(dev)->left_queue_depth))) { - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + if (!atomic_read(&dev->depth_left)) return transport_tcq_window_closed(dev); - } - dev->dev_tcq_window_closed = 0; - spin_lock(&dev->execute_task_lock); - task = transport_get_task_from_execute_queue(dev); - spin_unlock(&dev->execute_task_lock); + dev->dev_tcq_window_closed = 0; - if (!task) { - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); + spin_lock_irq(&dev->execute_task_lock); + if (list_empty(&dev->execute_task_list)) { + spin_unlock_irq(&dev->execute_task_lock); return 0; } + task = list_first_entry(&dev->execute_task_list, + struct se_task, t_execute_list); + list_del(&task->t_execute_list); + atomic_set(&task->task_execute_queue, 0); + atomic_dec(&dev->execute_tasks); + spin_unlock_irq(&dev->execute_task_lock); atomic_dec(&dev->depth_left); - atomic_dec(&SE_HBA(dev)->left_queue_depth); - spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags); - cmd = TASK_CMD(task); + cmd = task->task_se_cmd; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); atomic_set(&task->task_active, 1); atomic_set(&task->task_sent, 1); - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent); + atomic_inc(&cmd->t_task->t_task_cdbs_sent); - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) == - T_TASK(cmd)->t_task_cdbs) + if (atomic_read(&cmd->t_task->t_task_cdbs_sent) == + cmd->t_task->t_task_cdbs) atomic_set(&cmd->transport_sent, 1); transport_start_task_timer(task); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); /* * The struct se_cmd->transport_emulate_cdb() function pointer is used - * to grab REPORT_LUNS CDBs before they hit the + * to grab REPORT_LUNS and other CDBs we want to handle before they hit the * struct se_subsystem_api->do_task() caller below. */ if (cmd->transport_emulate_cdb) { @@ -2718,11 +2596,11 @@ check_depth: * call ->do_task() directly and let the underlying TCM subsystem plugin * code handle the CDB emulation. */ - if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && - (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) + if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) && + (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) error = transport_emulate_control_cdb(task); else - error = TRANSPORT(dev)->do_task(task); + error = dev->transport->do_task(task); if (error != 0) { cmd->transport_error_status = error; @@ -2745,12 +2623,12 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) * Any unsolicited data will get dumped for failed command inside of * the fabric plugin */ - spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); - CMD_TFO(se_cmd)->new_cmd_failure(se_cmd); + se_cmd->se_tfo->new_cmd_failure(se_cmd); } static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); @@ -2760,7 +2638,7 @@ static inline u32 transport_get_sectors_6( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_lun->lun_se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2772,7 +2650,7 @@ static inline u32 transport_get_sectors_6( /* * Use 24-bit allocation length for TYPE_TAPE. */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) + if (dev->transport->get_device_type(dev) == TYPE_TAPE) return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4]; /* @@ -2788,7 +2666,7 @@ static inline u32 transport_get_sectors_10( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_lun->lun_se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2800,8 +2678,8 @@ static inline u32 transport_get_sectors_10( /* * XXX_10 is not defined in SSC, throw an exception */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { - *ret = -1; + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + *ret = -EINVAL; return 0; } @@ -2818,7 +2696,7 @@ static inline u32 transport_get_sectors_12( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_lun->lun_se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2830,8 +2708,8 @@ static inline u32 transport_get_sectors_12( /* * XXX_12 is not defined in SSC, throw an exception */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { - *ret = -1; + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { + *ret = -EINVAL; return 0; } @@ -2848,7 +2726,7 @@ static inline u32 transport_get_sectors_16( struct se_cmd *cmd, int *ret) { - struct se_device *dev = SE_LUN(cmd)->lun_se_dev; + struct se_device *dev = cmd->se_lun->lun_se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2860,7 +2738,7 @@ static inline u32 transport_get_sectors_16( /* * Use 24-bit allocation length for TYPE_TAPE. */ - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) + if (dev->transport->get_device_type(dev) == TYPE_TAPE) return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14]; type_disk: @@ -2890,21 +2768,21 @@ static inline u32 transport_get_size( unsigned char *cdb, struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; - if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) { + if (dev->transport->get_device_type(dev) == TYPE_TAPE) { if (cdb[1] & 1) { /* sectors */ - return DEV_ATTRIB(dev)->block_size * sectors; + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } else /* bytes */ return sectors; } #if 0 printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" - " %s object\n", DEV_ATTRIB(dev)->block_size, sectors, - DEV_ATTRIB(dev)->block_size * sectors, - TRANSPORT(dev)->name); + " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, + dev->se_sub_dev->se_dev_attrib.block_size * sectors, + dev->transport->name); #endif - return DEV_ATTRIB(dev)->block_size * sectors; + return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) @@ -2958,17 +2836,17 @@ static void transport_xor_callback(struct se_cmd *cmd) return; } /* - * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list + * Copy the scatterlist WRITE buffer located at cmd->t_task->t_mem_list * into the locally allocated *buf */ - transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list); + transport_memcpy_se_mem_read_contig(cmd, buf, cmd->t_task->t_mem_list); /* * Now perform the XOR against the BIDI read memory located at - * T_TASK(cmd)->t_mem_bidi_list + * cmd->t_task->t_mem_bidi_list */ offset = 0; - list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) { + list_for_each_entry(se_mem, cmd->t_task->t_mem_bidi_list, se_list) { addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); if (!(addr)) goto out; @@ -2994,18 +2872,16 @@ static int transport_get_sense_data(struct se_cmd *cmd) unsigned long flags; u32 offset = 0; - if (!SE_LUN(cmd)) { - printk(KERN_ERR "SE_LUN(cmd) is NULL\n"); - return -1; - } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + WARN_ON(!cmd->se_lun); + + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return 0; } list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { + &cmd->t_task->t_task_list, t_list) { if (!task->task_sense) continue; @@ -3014,22 +2890,22 @@ static int transport_get_sense_data(struct se_cmd *cmd) if (!(dev)) continue; - if (!TRANSPORT(dev)->get_sense_buffer) { - printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer" + if (!dev->transport->get_sense_buffer) { + printk(KERN_ERR "dev->transport->get_sense_buffer" " is NULL\n"); continue; } - sense_buffer = TRANSPORT(dev)->get_sense_buffer(task); + sense_buffer = dev->transport->get_sense_buffer(task); if (!(sense_buffer)) { printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" " sense buffer for task with sense\n", - CMD_TFO(cmd)->get_task_tag(cmd), task->task_no); + cmd->se_tfo->get_task_tag(cmd), task->task_no); continue; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); - offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); memcpy((void *)&buffer[offset], (void *)sense_buffer, @@ -3041,11 +2917,11 @@ static int transport_get_sense_data(struct se_cmd *cmd) printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" " and sense\n", - dev->se_hba->hba_id, TRANSPORT(dev)->name, + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); return 0; } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return -1; } @@ -3077,9 +2953,9 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) * * See spc4r17, section 7.4.6 Control Mode Page, Table 349 */ - if (SE_SESS(cmd) && - DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2) - core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl, + if (cmd->se_sess && + cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) + core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); return -2; @@ -3099,7 +2975,7 @@ static int transport_generic_cmd_sequencer( struct se_cmd *cmd, unsigned char *cdb) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; int ret = 0, sector_ret = 0, passthrough; u32 sectors = 0, size = 0, pr_reg_type = 0; @@ -3118,7 +2994,7 @@ static int transport_generic_cmd_sequencer( /* * Check status of Asymmetric Logical Unit Assignment port */ - ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq); + ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq); if (ret != 0) { cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; /* @@ -3130,7 +3006,7 @@ static int transport_generic_cmd_sequencer( #if 0 printk(KERN_INFO "[%s]: ALUA TG Port not available," " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", - CMD_TFO(cmd)->get_fabric_name(), alua_ascq); + cmd->se_tfo->get_fabric_name(), alua_ascq); #endif transport_set_sense_codes(cmd, 0x04, alua_ascq); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -3142,8 +3018,8 @@ static int transport_generic_cmd_sequencer( /* * Check status for SPC-3 Persistent Reservations */ - if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) { - if (T10_PR_OPS(su_dev)->t10_seq_non_holder( + if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) { + if (su_dev->t10_pr.pr_ops.t10_seq_non_holder( cmd, cdb, pr_reg_type) != 0) return transport_handle_reservation_conflict(cmd); /* @@ -3160,7 +3036,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); + cmd->t_task->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_10: @@ -3169,7 +3045,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_12: @@ -3178,7 +3054,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_16: @@ -3187,7 +3063,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + cmd->t_task->t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_6: @@ -3196,7 +3072,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - T_TASK(cmd)->t_task_lba = transport_lba_21(cdb); + cmd->t_task->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_10: @@ -3205,8 +3081,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -3215,8 +3091,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -3225,22 +3101,22 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task->t_task_lba = transport_lba_64(cdb); + cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(T_TASK(cmd)->t_tasks_bidi)) + !(cmd->t_task->t_tasks_bidi)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; - passthrough = (TRANSPORT(dev)->transport_type == + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* * Skip the remaining assignments for TCM/PSCSI passthrough @@ -3251,7 +3127,7 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run during transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -3259,7 +3135,7 @@ static int transport_generic_cmd_sequencer( * Determine if this is TCM/PSCSI device and we should disable * internal emulation for this CDB. */ - passthrough = (TRANSPORT(dev)->transport_type == + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); switch (service_action) { @@ -3273,7 +3149,7 @@ static int transport_generic_cmd_sequencer( * XDWRITE_READ_32 logic. */ cmd->transport_split_cdb = &split_cdb_XX_32; - T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb); + cmd->t_task->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; /* @@ -3287,14 +3163,14 @@ static int transport_generic_cmd_sequencer( * transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8); + cmd->t_task->t_tasks_fua = (cdb[10] & 0x8); break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]); + cmd->t_task->t_task_lba = get_unaligned_be64(&cdb[12]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; /* @@ -3326,16 +3202,16 @@ static int transport_generic_cmd_sequencer( } break; case MAINTENANCE_IN: - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { + if (dev->transport->get_device_type(dev) != TYPE_ROM) { /* MAINTENANCE_IN from SCC-2 */ /* * Check for emulated MI_REPORT_TARGET_PGS. */ if (cdb[1] == MI_REPORT_TARGET_PGS) { cmd->transport_emulate_cdb = - (T10_ALUA(su_dev)->alua_type == + (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) ? - &core_emulate_report_target_port_groups : + core_emulate_report_target_port_groups : NULL; } size = (cdb[6] << 24) | (cdb[7] << 16) | @@ -3380,9 +3256,9 @@ static int transport_generic_cmd_sequencer( case PERSISTENT_RESERVE_IN: case PERSISTENT_RESERVE_OUT: cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type == + (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS) ? - &core_scsi3_emulate_pr : NULL; + core_scsi3_emulate_pr : NULL; size = (cdb[7] << 8) + cdb[8]; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; @@ -3396,16 +3272,16 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; case MAINTENANCE_OUT: - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) { + if (dev->transport->get_device_type(dev) != TYPE_ROM) { /* MAINTENANCE_OUT from SCC-2 * * Check for emulated MO_SET_TARGET_PGS. */ if (cdb[1] == MO_SET_TARGET_PGS) { cmd->transport_emulate_cdb = - (T10_ALUA(su_dev)->alua_type == + (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) ? - &core_emulate_set_target_port_groups : + core_emulate_set_target_port_groups : NULL; } @@ -3423,7 +3299,7 @@ static int transport_generic_cmd_sequencer( * Do implict HEAD_OF_QUEUE processing for INQUIRY. * See spc4r17 section 5.3 */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; @@ -3500,9 +3376,9 @@ static int transport_generic_cmd_sequencer( * emulation disabled. */ cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type != + (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) ? - &core_scsi2_emulate_crh : NULL; + core_scsi2_emulate_crh : NULL; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case RELEASE: @@ -3517,9 +3393,9 @@ static int transport_generic_cmd_sequencer( size = cmd->data_length; cmd->transport_emulate_cdb = - (T10_RES(su_dev)->res_type != + (su_dev->t10_pr.res_type != SPC_PASSTHROUGH) ? - &core_scsi2_emulate_crh : NULL; + core_scsi2_emulate_crh : NULL; cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB; break; case SYNCHRONIZE_CACHE: @@ -3529,10 +3405,10 @@ static int transport_generic_cmd_sequencer( */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb, cmd, §or_ret); - T_TASK(cmd)->t_task_lba = transport_lba_32(cdb); + cmd->t_task->t_task_lba = transport_lba_32(cdb); } else { sectors = transport_get_sectors_16(cdb, cmd, §or_ret); - T_TASK(cmd)->t_task_lba = transport_lba_64(cdb); + cmd->t_task->t_task_lba = transport_lba_64(cdb); } if (sector_ret) goto out_unsupported_cdb; @@ -3543,7 +3419,7 @@ static int transport_generic_cmd_sequencer( /* * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb() */ - if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) + if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) break; /* * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation @@ -3559,7 +3435,7 @@ static int transport_generic_cmd_sequencer( break; case UNMAP: size = get_unaligned_be16(&cdb[7]); - passthrough = (TRANSPORT(dev)->transport_type == + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* * Determine if the received UNMAP used to for direct passthrough @@ -3578,8 +3454,8 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]); - passthrough = (TRANSPORT(dev)->transport_type == + cmd->t_task->t_task_lba = get_unaligned_be16(&cdb[2]); + passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* * Determine if the received WRITE_SAME_16 is used to for direct @@ -3625,20 +3501,20 @@ static int transport_generic_cmd_sequencer( break; case REPORT_LUNS: cmd->transport_emulate_cdb = - &transport_core_report_lun_response; + transport_core_report_lun_response; size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; /* * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; default: printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" " 0x%02x, sending CHECK_CONDITION.\n", - CMD_TFO(cmd)->get_fabric_name(), cdb[0]); + cmd->se_tfo->get_fabric_name(), cdb[0]); cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; goto out_unsupported_cdb; } @@ -3646,7 +3522,7 @@ static int transport_generic_cmd_sequencer( if (size != cmd->data_length) { printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" - " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(), + " 0x%02x\n", cmd->se_tfo->get_fabric_name(), cmd->data_length, size, cdb[0]); cmd->cmd_spdtl = size; @@ -3660,10 +3536,10 @@ static int transport_generic_cmd_sequencer( * Reject READ_* or WRITE_* with overflow/underflow for * type SCF_SCSI_DATA_SG_IO_CDB. */ - if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) { + if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" " CDB on non 512-byte sector setup subsystem" - " plugin: %s\n", TRANSPORT(dev)->name); + " plugin: %s\n", dev->transport->name); /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ goto out_invalid_cdb_field; } @@ -3786,7 +3662,7 @@ static void transport_memcpy_se_mem_read_contig( */ static void transport_complete_task_attr(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_cmd *cmd_p, *cmd_tmp; int new_active_tasks = 0; @@ -3846,7 +3722,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) * to do the processing of the Active tasks. */ if (new_active_tasks != 0) - wake_up_interruptible(&dev->dev_queue_obj->thread_wq); + wake_up_interruptible(&dev->dev_queue_obj.thread_wq); } static void transport_generic_complete_ok(struct se_cmd *cmd) @@ -3857,7 +3733,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task * Attribute. */ - if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); /* * Check if we need to retrieve a sense buffer from @@ -3889,8 +3765,8 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) switch (cmd->data_direction) { case DMA_FROM_DEVICE: spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.tx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); @@ -3901,34 +3777,34 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) */ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) transport_memcpy_write_contig(cmd, - T_TASK(cmd)->t_task_pt_sgl, - T_TASK(cmd)->t_task_buf); + cmd->t_task->t_task_pt_sgl, + cmd->t_task->t_task_buf); - CMD_TFO(cmd)->queue_data_in(cmd); + cmd->se_tfo->queue_data_in(cmd); break; case DMA_TO_DEVICE: spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.rx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (T_TASK(cmd)->t_mem_bidi_list != NULL) { + if (cmd->t_task->t_mem_bidi_list != NULL) { spin_lock(&cmd->se_lun->lun_sep_lock); - if (SE_LUN(cmd)->lun_sep) { - SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets += + if (cmd->se_lun->lun_sep) { + cmd->se_lun->lun_sep->sep_stats.tx_data_octets += cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); - CMD_TFO(cmd)->queue_data_in(cmd); + cmd->se_tfo->queue_data_in(cmd); break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); break; default: break; @@ -3943,9 +3819,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) struct se_task *task, *task_tmp; unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &T_TASK(cmd)->t_task_list, t_list) { + &cmd->t_task->t_task_list, t_list) { if (atomic_read(&task->task_active)) continue; @@ -3954,15 +3830,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) list_del(&task->t_list); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); if (task->se_dev) - TRANSPORT(task->se_dev)->free_task(task); + task->se_dev->transport->free_task(task); else printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", task->task_no); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); } static inline void transport_free_pages(struct se_cmd *cmd) @@ -3975,9 +3851,9 @@ static inline void transport_free_pages(struct se_cmd *cmd) if (cmd->se_dev->transport->do_se_mem_map) free_page = 0; - if (T_TASK(cmd)->t_task_buf) { - kfree(T_TASK(cmd)->t_task_buf); - T_TASK(cmd)->t_task_buf = NULL; + if (cmd->t_task->t_task_buf) { + kfree(cmd->t_task->t_task_buf); + cmd->t_task->t_task_buf = NULL; return; } @@ -3987,11 +3863,11 @@ static inline void transport_free_pages(struct se_cmd *cmd) if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) return; - if (!(T_TASK(cmd)->t_tasks_se_num)) + if (!(cmd->t_task->t_tasks_se_num)) return; list_for_each_entry_safe(se_mem, se_mem_tmp, - T_TASK(cmd)->t_mem_list, se_list) { + cmd->t_task->t_mem_list, se_list) { /* * We only release call __free_page(struct se_mem->se_page) when * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, @@ -4003,9 +3879,9 @@ static inline void transport_free_pages(struct se_cmd *cmd) kmem_cache_free(se_mem_cache, se_mem); } - if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) { + if (cmd->t_task->t_mem_bidi_list && cmd->t_task->t_tasks_se_bidi_num) { list_for_each_entry_safe(se_mem, se_mem_tmp, - T_TASK(cmd)->t_mem_bidi_list, se_list) { + cmd->t_task->t_mem_bidi_list, se_list) { /* * We only release call __free_page(struct se_mem->se_page) when * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, @@ -4018,11 +3894,11 @@ static inline void transport_free_pages(struct se_cmd *cmd) } } - kfree(T_TASK(cmd)->t_mem_bidi_list); - T_TASK(cmd)->t_mem_bidi_list = NULL; - kfree(T_TASK(cmd)->t_mem_list); - T_TASK(cmd)->t_mem_list = NULL; - T_TASK(cmd)->t_tasks_se_num = 0; + kfree(cmd->t_task->t_mem_bidi_list); + cmd->t_task->t_mem_bidi_list = NULL; + kfree(cmd->t_task->t_mem_list); + cmd->t_task->t_mem_list = NULL; + cmd->t_task->t_tasks_se_num = 0; } static inline void transport_release_tasks(struct se_cmd *cmd) @@ -4034,23 +3910,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + if (atomic_read(&cmd->t_task->t_fe_count)) { + if (!(atomic_dec_and_test(&cmd->t_task->t_fe_count))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return 1; } } - if (atomic_read(&T_TASK(cmd)->t_se_count)) { - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + if (atomic_read(&cmd->t_task->t_se_count)) { + if (!(atomic_dec_and_test(&cmd->t_task->t_se_count))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return 1; } } - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return 0; } @@ -4062,20 +3938,20 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) if (transport_dec_and_check(cmd)) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + if (!(atomic_read(&cmd->t_task->transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); goto free_pages; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->t_task->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_release_tasks(cmd); free_pages: transport_free_pages(cmd); transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_direct(cmd); + cmd->se_tfo->release_cmd_direct(cmd); } static int transport_generic_remove( @@ -4085,27 +3961,27 @@ static int transport_generic_remove( { unsigned long flags; - if (!(T_TASK(cmd))) + if (!(cmd->t_task)) goto release_cmd; if (transport_dec_and_check(cmd)) { if (session_reinstatement) { - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); } return 1; } - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + if (!(atomic_read(&cmd->t_task->transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); goto free_pages; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->t_task->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_release_tasks(cmd); free_pages: @@ -4116,7 +3992,7 @@ release_cmd: transport_release_cmd_to_pool(cmd); } else { transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_direct(cmd); + cmd->se_tfo->release_cmd_direct(cmd); } return 0; @@ -4156,8 +4032,8 @@ int transport_generic_map_mem_to_cmd( return -ENOSYS; } - T_TASK(cmd)->t_mem_list = (struct list_head *)mem; - T_TASK(cmd)->t_tasks_se_num = sg_mem_num; + cmd->t_task->t_mem_list = (struct list_head *)mem; + cmd->t_task->t_tasks_se_num = sg_mem_num; cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; return 0; } @@ -4172,36 +4048,36 @@ int transport_generic_map_mem_to_cmd( * processed into a TCM struct se_subsystem_dev, we do the mapping * from the passed physical memory to struct se_mem->se_page here. */ - T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_list)) + cmd->t_task->t_mem_list = transport_init_se_mem_list(); + if (!(cmd->t_task->t_mem_list)) return -ENOMEM; ret = transport_map_sg_to_mem(cmd, - T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out); + cmd->t_task->t_mem_list, mem, &se_mem_cnt_out); if (ret < 0) return -ENOMEM; - T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out; + cmd->t_task->t_tasks_se_num = se_mem_cnt_out; /* * Setup BIDI READ list of struct se_mem elements */ if ((mem_bidi_in) && (sg_mem_bidi_num)) { - T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_bidi_list)) { - kfree(T_TASK(cmd)->t_mem_list); + cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); + if (!(cmd->t_task->t_mem_bidi_list)) { + kfree(cmd->t_task->t_mem_list); return -ENOMEM; } se_mem_cnt_out = 0; ret = transport_map_sg_to_mem(cmd, - T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in, + cmd->t_task->t_mem_bidi_list, mem_bidi_in, &se_mem_cnt_out); if (ret < 0) { - kfree(T_TASK(cmd)->t_mem_list); + kfree(cmd->t_task->t_mem_list); return -ENOMEM; } - T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out; + cmd->t_task->t_tasks_se_bidi_num = se_mem_cnt_out; } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; @@ -4221,7 +4097,7 @@ int transport_generic_map_mem_to_cmd( * struct scatterlist format. */ cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; - T_TASK(cmd)->t_task_pt_sgl = mem; + cmd->t_task->t_task_pt_sgl = mem; } return 0; @@ -4236,21 +4112,21 @@ static inline long long transport_dev_end_lba(struct se_device *dev) static int transport_get_sectors(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; - T_TASK(cmd)->t_tasks_sectors = - (cmd->data_length / DEV_ATTRIB(dev)->block_size); - if (!(T_TASK(cmd)->t_tasks_sectors)) - T_TASK(cmd)->t_tasks_sectors = 1; + cmd->t_task->t_tasks_sectors = + (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); + if (!(cmd->t_task->t_tasks_sectors)) + cmd->t_task->t_tasks_sectors = 1; - if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK) + if (dev->transport->get_device_type(dev) != TYPE_DISK) return 0; - if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) > + if ((cmd->t_task->t_task_lba + cmd->t_task->t_tasks_sectors) > transport_dev_end_lba(dev)) { printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" " transport_dev_end_lba(): %llu\n", - T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, + cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, transport_dev_end_lba(dev)); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; @@ -4262,26 +4138,26 @@ static int transport_get_sectors(struct se_cmd *cmd) static int transport_new_cmd_obj(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; u32 task_cdbs = 0, rc; if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { task_cdbs++; - T_TASK(cmd)->t_task_cdbs++; + cmd->t_task->t_task_cdbs++; } else { int set_counts = 1; /* * Setup any BIDI READ tasks and memory from - * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks + * cmd->t_task->t_mem_bidi_list so the READ struct se_tasks * are queued first for the non pSCSI passthrough case. */ - if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && - (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { + if ((cmd->t_task->t_mem_bidi_list != NULL) && + (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { rc = transport_generic_get_cdb_count(cmd, - T_TASK(cmd)->t_task_lba, - T_TASK(cmd)->t_tasks_sectors, - DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list, + cmd->t_task->t_task_lba, + cmd->t_task->t_tasks_sectors, + DMA_FROM_DEVICE, cmd->t_task->t_mem_bidi_list, set_counts); if (!(rc)) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -4292,13 +4168,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) set_counts = 0; } /* - * Setup the tasks and memory from T_TASK(cmd)->t_mem_list + * Setup the tasks and memory from cmd->t_task->t_mem_list * Note for BIDI transfers this will contain the WRITE payload */ task_cdbs = transport_generic_get_cdb_count(cmd, - T_TASK(cmd)->t_task_lba, - T_TASK(cmd)->t_tasks_sectors, - cmd->data_direction, T_TASK(cmd)->t_mem_list, + cmd->t_task->t_task_lba, + cmd->t_task->t_tasks_sectors, + cmd->data_direction, cmd->t_task->t_mem_list, set_counts); if (!(task_cdbs)) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -4306,19 +4182,19 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return PYX_TRANSPORT_LU_COMM_FAILURE; } - T_TASK(cmd)->t_task_cdbs += task_cdbs; + cmd->t_task->t_task_cdbs += task_cdbs; #if 0 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, - T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors, - T_TASK(cmd)->t_task_cdbs); + cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, + cmd->t_task->t_task_cdbs); #endif } - atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs); - atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs); - atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs); + atomic_set(&cmd->t_task->t_task_cdbs_left, task_cdbs); + atomic_set(&cmd->t_task->t_task_cdbs_ex_left, task_cdbs); + atomic_set(&cmd->t_task->t_task_cdbs_timeout_left, task_cdbs); return 0; } @@ -4342,8 +4218,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) unsigned char *buf; struct se_mem *se_mem; - T_TASK(cmd)->t_mem_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_list)) + cmd->t_task->t_mem_list = transport_init_se_mem_list(); + if (!(cmd->t_task->t_mem_list)) return -ENOMEM; /* @@ -4355,10 +4231,10 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) /* * Setup BIDI-COMMAND READ list of struct se_mem elements */ - if (T_TASK(cmd)->t_tasks_bidi) { - T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(T_TASK(cmd)->t_mem_bidi_list)) { - kfree(T_TASK(cmd)->t_mem_list); + if (cmd->t_task->t_tasks_bidi) { + cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); + if (!(cmd->t_task->t_mem_bidi_list)) { + kfree(cmd->t_task->t_mem_list); return -ENOMEM; } } @@ -4387,8 +4263,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) memset(buf, 0, se_mem->se_len); kunmap_atomic(buf, KM_IRQ0); - list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list); - T_TASK(cmd)->t_tasks_se_num++; + list_add_tail(&se_mem->se_list, cmd->t_task->t_mem_list); + cmd->t_task->t_tasks_se_num++; DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" " Offset(%u)\n", se_mem->se_page, se_mem->se_len, @@ -4398,25 +4274,25 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) } DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", - T_TASK(cmd)->t_tasks_se_num); + cmd->t_task->t_tasks_se_num); return 0; out: if (se_mem) __free_pages(se_mem->se_page, 0); kmem_cache_free(se_mem_cache, se_mem); - return -1; + return -ENOMEM; } -u32 transport_calc_sg_num( +int transport_init_task_sg( struct se_task *task, struct se_mem *in_se_mem, u32 task_offset) { struct se_cmd *se_cmd = task->task_se_cmd; - struct se_device *se_dev = SE_DEV(se_cmd); + struct se_device *se_dev = se_cmd->se_lun->lun_se_dev; struct se_mem *se_mem = in_se_mem; - struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd); + struct target_core_fabric_ops *tfo = se_cmd->se_tfo; u32 sg_length, task_size = task->task_size, task_sg_num_padded; while (task_size != 0) { @@ -4430,7 +4306,7 @@ u32 transport_calc_sg_num( sg_length = se_mem->se_len; if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) + se_cmd->t_task->t_mem_list))) se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); } else { @@ -4450,7 +4326,7 @@ u32 transport_calc_sg_num( sg_length = (se_mem->se_len - task_offset); if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) + se_cmd->t_task->t_mem_list))) se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); } @@ -4484,21 +4360,23 @@ next: if (!(task->task_sg)) { printk(KERN_ERR "Unable to allocate memory for" " task->task_sg\n"); - return 0; + return -ENOMEM; } sg_init_table(&task->task_sg[0], task_sg_num_padded); /* * Setup task->task_sg_bidi for SCSI READ payload for * TCM/pSCSI passthrough if present for BIDI-COMMAND */ - if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) && - (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { + if ((se_cmd->t_task->t_mem_bidi_list != NULL) && + (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { task->task_sg_bidi = kzalloc(task_sg_num_padded * sizeof(struct scatterlist), GFP_KERNEL); if (!(task->task_sg_bidi)) { + kfree(task->task_sg); + task->task_sg = NULL; printk(KERN_ERR "Unable to allocate memory for" " task->task_sg_bidi\n"); - return 0; + return -ENOMEM; } sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); } @@ -4535,13 +4413,13 @@ static inline int transport_set_tasks_sectors_disk( if ((lba + sectors) > transport_dev_end_lba(dev)) { task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); - if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; + if (task->task_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { + task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; *max_sectors_set = 1; } } else { - if (sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; + if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { + task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; *max_sectors_set = 1; } else task->task_sectors = sectors; @@ -4557,8 +4435,8 @@ static inline int transport_set_tasks_sectors_non_disk( u32 sectors, int *max_sectors_set) { - if (sectors > DEV_ATTRIB(dev)->max_sectors) { - task->task_sectors = DEV_ATTRIB(dev)->max_sectors; + if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { + task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; *max_sectors_set = 1; } else task->task_sectors = sectors; @@ -4573,7 +4451,7 @@ static inline int transport_set_tasks_sectors( u32 sectors, int *max_sectors_set) { - return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ? + return (dev->transport->get_device_type(dev) == TYPE_DISK) ? transport_set_tasks_sectors_disk(task, dev, lba, sectors, max_sectors_set) : transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, @@ -4590,17 +4468,15 @@ static int transport_map_sg_to_mem( struct scatterlist *sg; u32 sg_count = 1, cmd_size = cmd->data_length; - if (!in_mem) { - printk(KERN_ERR "No source scatterlist\n"); - return -1; - } + WARN_ON(!in_mem); + sg = (struct scatterlist *)in_mem; while (cmd_size) { se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); if (!(se_mem)) { printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(&se_mem->se_list); DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" @@ -4658,7 +4534,7 @@ int transport_map_mem_to_sg( if (!sg) { printk(KERN_ERR "Unable to locate valid struct" " scatterlist pointer\n"); - return -1; + return -EINVAL; } while (task_size != 0) { @@ -4675,7 +4551,7 @@ int transport_map_mem_to_sg( sg->length = se_mem->se_len; if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) { + se_cmd->t_task->t_mem_list))) { se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); (*se_mem_cnt)++; @@ -4711,7 +4587,7 @@ int transport_map_mem_to_sg( sg->length = (se_mem->se_len - *task_offset); if (!(list_is_last(&se_mem->se_list, - T_TASK(se_cmd)->t_mem_list))) { + se_cmd->t_task->t_mem_list))) { se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); (*se_mem_cnt)++; @@ -4755,7 +4631,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; struct se_task *task; - struct target_core_fabric_ops *tfo = CMD_TFO(cmd); + struct target_core_fabric_ops *tfo = cmd->se_tfo; u32 task_sg_num = 0, sg_count = 0; int i; @@ -4769,7 +4645,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Walk the struct se_task list and setup scatterlist chains * for each contiguosly allocated struct se_task->task_sg[]. */ - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { if (!(task->task_sg) || !(task->task_padded_sg)) continue; @@ -4780,10 +4656,10 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Either add chain or mark end of scatterlist */ if (!(list_is_last(&task->t_list, - &T_TASK(cmd)->t_task_list))) { + &cmd->t_task->t_task_list))) { /* * Clear existing SGL termination bit set in - * transport_calc_sg_num(), see sg_mark_end() + * transport_init_task_sg(), see sg_mark_end() */ sg_end_cur = &task->task_sg[task->task_sg_num - 1]; sg_end_cur->page_link &= ~0x02; @@ -4806,10 +4682,10 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) /* * Check for single task.. */ - if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) { + if (!(list_is_last(&task->t_list, &cmd->t_task->t_task_list))) { /* * Clear existing SGL termination bit set in - * transport_calc_sg_num(), see sg_mark_end() + * transport_init_task_sg(), see sg_mark_end() */ sg_end = &task->task_sg[task->task_sg_num - 1]; sg_end->page_link &= ~0x02; @@ -4824,15 +4700,15 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Setup the starting pointer and total t_tasks_sg_linked_no including * padding SGs for linking and to mark the end. */ - T_TASK(cmd)->t_tasks_sg_chained = sg_first; - T_TASK(cmd)->t_tasks_sg_chained_no = sg_count; + cmd->t_task->t_tasks_sg_chained = sg_first; + cmd->t_task->t_tasks_sg_chained_no = sg_count; - DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained, - T_TASK(cmd)->t_tasks_sg_chained_no); + DEBUG_CMD_M("Setup cmd: %p cmd->t_task->t_tasks_sg_chained: %p and" + " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task->t_tasks_sg_chained, + cmd->t_task->t_tasks_sg_chained_no); - for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg, - T_TASK(cmd)->t_tasks_sg_chained_no, i) { + for_each_sg(cmd->t_task->t_tasks_sg_chained, sg, + cmd->t_task->t_tasks_sg_chained_no, i) { DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); @@ -4860,12 +4736,12 @@ static int transport_do_se_mem_map( * se_subsystem_api_t->do_se_mem_map is used when internal allocation * has been done by the transport plugin. */ - if (TRANSPORT(dev)->do_se_mem_map) { - ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list, + if (dev->transport->do_se_mem_map) { + ret = dev->transport->do_se_mem_map(task, se_mem_list, in_mem, in_se_mem, out_se_mem, se_mem_cnt, task_offset_in); if (ret == 0) - T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; return ret; } @@ -4875,7 +4751,7 @@ static int transport_do_se_mem_map( * This is the normal path for all normal non BIDI and BIDI-COMMAND * WRITE payloads.. If we need to do BIDI READ passthrough for * TCM/pSCSI the first call to transport_do_se_mem_map -> - * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the + * transport_init_task_sg() -> transport_map_mem_to_sg() will do the * allocation for task->task_sg_bidi, and the subsequent call to * transport_do_se_mem_map() from transport_generic_get_cdb_count() */ @@ -4884,8 +4760,9 @@ static int transport_do_se_mem_map( * Assume default that transport plugin speaks preallocated * scatterlists. */ - if (!(transport_calc_sg_num(task, in_se_mem, task_offset))) - return -1; + ret = transport_init_task_sg(task, in_se_mem, task_offset); + if (ret <= 0) + return ret; /* * struct se_task->task_sg now contains the struct scatterlist array. */ @@ -4914,7 +4791,7 @@ static u32 transport_generic_get_cdb_count( struct se_task *task; struct se_mem *se_mem = NULL, *se_mem_lout = NULL; struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; int max_sectors_set = 0, ret; u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; @@ -4933,15 +4810,15 @@ static u32 transport_generic_get_cdb_count( * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation */ - if ((T_TASK(cmd)->t_mem_bidi_list != NULL) && - !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) && - (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) - se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next, + if ((cmd->t_task->t_mem_bidi_list != NULL) && + !(list_empty(cmd->t_task->t_mem_bidi_list)) && + (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) + se_mem_bidi = list_entry(cmd->t_task->t_mem_bidi_list->next, struct se_mem, se_list); while (sectors) { DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", - CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors, + cmd->se_tfo->get_task_tag(cmd), lba, sectors, transport_dev_end_lba(dev)); task = transport_generic_get_task(cmd, data_direction); @@ -4955,19 +4832,19 @@ static u32 transport_generic_get_cdb_count( lba += task->task_sectors; sectors -= task->task_sectors; task->task_size = (task->task_sectors * - DEV_ATTRIB(dev)->block_size); + dev->se_sub_dev->se_dev_attrib.block_size); - cdb = TRANSPORT(dev)->get_cdb(task); + cdb = dev->transport->get_cdb(task); if ((cdb)) { - memcpy(cdb, T_TASK(cmd)->t_task_cdb, - scsi_command_size(T_TASK(cmd)->t_task_cdb)); + memcpy(cdb, cmd->t_task->t_task_cdb, + scsi_command_size(cmd->t_task->t_task_cdb)); cmd->transport_split_cdb(task->task_lba, &task->task_sectors, cdb); } /* * Perform the SE OBJ plugin and/or Transport plugin specific - * mapping for T_TASK(cmd)->t_mem_list. And setup the + * mapping for cmd->t_task->t_mem_list. And setup the * task->task_sg and if necessary task->task_sg_bidi */ ret = transport_do_se_mem_map(dev, task, mem_list, @@ -4978,17 +4855,17 @@ static u32 transport_generic_get_cdb_count( se_mem = se_mem_lout; /* - * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi + * Setup the cmd->t_task->t_mem_bidi_list -> task->task_sg_bidi * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI * * Note that the first call to transport_do_se_mem_map() above will * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() - * -> transport_calc_sg_num(), and the second here will do the + * -> transport_init_task_sg(), and the second here will do the * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. */ if (task->task_sg_bidi != NULL) { ret = transport_do_se_mem_map(dev, task, - T_TASK(cmd)->t_mem_bidi_list, NULL, + cmd->t_task->t_mem_bidi_list, NULL, se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, &task_offset_in); if (ret < 0) @@ -5011,12 +4888,12 @@ static u32 transport_generic_get_cdb_count( } if (set_counts) { - atomic_inc(&T_TASK(cmd)->t_fe_count); - atomic_inc(&T_TASK(cmd)->t_se_count); + atomic_inc(&cmd->t_task->t_fe_count); + atomic_inc(&cmd->t_task->t_se_count); } DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", - CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) + cmd->se_tfo->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); return task_cdbs; @@ -5027,7 +4904,7 @@ out: static int transport_map_control_cmd_to_task(struct se_cmd *cmd) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; unsigned char *cdb; struct se_task *task; int ret; @@ -5036,7 +4913,7 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) if (!task) return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; - cdb = TRANSPORT(dev)->get_cdb(task); + cdb = dev->transport->get_cdb(task); if (cdb) memcpy(cdb, cmd->t_task->t_task_cdb, scsi_command_size(cmd->t_task->t_task_cdb)); @@ -5052,8 +4929,8 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) struct se_mem *se_mem = NULL, *se_mem_lout = NULL; u32 se_mem_cnt = 0, task_offset = 0; - if (!list_empty(T_TASK(cmd)->t_mem_list)) - se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, + if (!list_empty(cmd->t_task->t_mem_list)) + se_mem = list_entry(cmd->t_task->t_mem_list->next, struct se_mem, se_list); ret = transport_do_se_mem_map(dev, task, @@ -5092,14 +4969,14 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) { struct se_portal_group *se_tpg; struct se_task *task; - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; int ret = 0; /* * Determine is the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() * to setup beforehand the linked list of physical memory at - * T_TASK(cmd)->t_mem_list of struct se_mem->se_page + * cmd->t_task->t_mem_list of struct se_mem->se_page */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { ret = transport_allocate_resources(cmd); @@ -5120,15 +4997,15 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) * Linux/NET via kernel sockets and needs to allocate a * struct iovec array to complete the struct se_cmd */ - se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg; - if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) { - ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd); + se_tpg = cmd->se_lun->lun_sep->sep_tpg; + if (se_tpg->se_tpg_tfo->alloc_cmd_iovecs != NULL) { + ret = se_tpg->se_tpg_tfo->alloc_cmd_iovecs(cmd); if (ret < 0) return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; } if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { if (atomic_read(&task->task_sent)) continue; if (!dev->transport->map_task_SG) @@ -5175,9 +5052,9 @@ void transport_generic_process_write(struct se_cmd *cmd) * original EDTL */ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { - if (!T_TASK(cmd)->t_tasks_se_num) { + if (!cmd->t_task->t_tasks_se_num) { unsigned char *dst, *buf = - (unsigned char *)T_TASK(cmd)->t_task_buf; + (unsigned char *)cmd->t_task->t_task_buf; dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); if (!(dst)) { @@ -5189,15 +5066,15 @@ void transport_generic_process_write(struct se_cmd *cmd) } memcpy(dst, buf, cmd->cmd_spdtl); - kfree(T_TASK(cmd)->t_task_buf); - T_TASK(cmd)->t_task_buf = dst; + kfree(cmd->t_task->t_task_buf); + cmd->t_task->t_task_buf = dst; } else { struct scatterlist *sg = - (struct scatterlist *sg)T_TASK(cmd)->t_task_buf; + (struct scatterlist *sg)cmd->t_task->t_task_buf; struct scatterlist *orig_sg; orig_sg = kzalloc(sizeof(struct scatterlist) * - T_TASK(cmd)->t_tasks_se_num, + cmd->t_task->t_tasks_se_num, GFP_KERNEL))) { if (!(orig_sg)) { printk(KERN_ERR "Unable to allocate memory" @@ -5207,9 +5084,9 @@ void transport_generic_process_write(struct se_cmd *cmd) return; } - memcpy(orig_sg, T_TASK(cmd)->t_task_buf, + memcpy(orig_sg, cmd->t_task->t_task_buf, sizeof(struct scatterlist) * - T_TASK(cmd)->t_tasks_se_num); + cmd->t_task->t_tasks_se_num); cmd->data_length = cmd->cmd_spdtl; /* @@ -5240,24 +5117,24 @@ static int transport_generic_write_pending(struct se_cmd *cmd) unsigned long flags; int ret; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); cmd->t_state = TRANSPORT_WRITE_PENDING; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); /* * For the TCM control CDBs using a contiguous buffer, do the memcpy * from the passed Linux/SCSI struct scatterlist located at - * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at - * T_TASK(se_cmd)->t_task_buf. + * se_cmd->t_task->t_task_pt_buf to the contiguous buffer at + * se_cmd->t_task->t_task_buf. */ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) transport_memcpy_read_contig(cmd, - T_TASK(cmd)->t_task_buf, - T_TASK(cmd)->t_task_pt_sgl); + cmd->t_task->t_task_buf, + cmd->t_task->t_task_pt_sgl); /* * Clear the se_cmd for WRITE_PENDING status in order to set - * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data + * cmd->t_task->t_transport_active=0 so that transport_generic_handle_data * can be called from HW target mode interrupt code. This is safe - * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending + * to be called with transport_off=1 before the cmd->se_tfo->write_pending * because the se_cmd->se_lun pointer is not being cleared. */ transport_cmd_check_stop(cmd, 1, 0); @@ -5266,7 +5143,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) * Call the fabric write_pending function here to let the * frontend know that WRITE buffers are ready. */ - ret = CMD_TFO(cmd)->write_pending(cmd); + ret = cmd->se_tfo->write_pending(cmd); if (ret < 0) return ret; @@ -5279,11 +5156,11 @@ static int transport_generic_write_pending(struct se_cmd *cmd) */ void transport_release_cmd_to_pool(struct se_cmd *cmd) { - BUG_ON(!T_TASK(cmd)); - BUG_ON(!CMD_TFO(cmd)); + BUG_ON(!cmd->t_task); + BUG_ON(!cmd->se_tfo); transport_free_se_cmd(cmd); - CMD_TFO(cmd)->release_cmd_to_pool(cmd); + cmd->se_tfo->release_cmd_to_pool(cmd); } EXPORT_SYMBOL(transport_release_cmd_to_pool); @@ -5297,16 +5174,16 @@ void transport_generic_free_cmd( int release_to_pool, int session_reinstatement) { - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd)) + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !cmd->t_task) transport_release_cmd_to_pool(cmd); else { core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); - if (SE_LUN(cmd)) { + if (cmd->se_lun) { #if 0 printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" - " SE_LUN(cmd)\n", cmd, - CMD_TFO(cmd)->get_task_tag(cmd)); + " cmd->se_lun\n", cmd, + cmd->se_tfo->get_task_tag(cmd)); #endif transport_lun_remove_cmd(cmd); } @@ -5343,32 +5220,32 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) * If the frontend has already requested this struct se_cmd to * be stopped, we can safely ignore this struct se_cmd. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - if (atomic_read(&T_TASK(cmd)->t_transport_stop)) { - atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + if (atomic_read(&cmd->t_task->t_transport_stop)) { + atomic_set(&cmd->t_task->transport_lun_stop, 0); DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" - " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); transport_cmd_check_stop(cmd, 1, 0); - return -1; + return -EPERM; } - atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + atomic_set(&cmd->t_task->transport_lun_fe_stop, 1); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); - wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); + wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); ret = transport_stop_tasks_for_cmd(cmd); DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" - " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret); + " %d\n", cmd, cmd->t_task->t_task_cdbs, ret); if (!ret) { DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", - CMD_TFO(cmd)->get_task_tag(cmd)); - wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp); + cmd->se_tfo->get_task_tag(cmd)); + wait_for_completion(&cmd->t_task->transport_lun_stop_comp); DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); } - transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); return 0; } @@ -5394,33 +5271,33 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) struct se_cmd, se_lun_list); list_del(&cmd->se_lun_list); - if (!(T_TASK(cmd))) { - printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL" + if (!(cmd->t_task)) { + printk(KERN_ERR "ITT: 0x%08x, cmd->t_task = NULL" "[i,t]_state: %u/%u\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); BUG(); } - atomic_set(&T_TASK(cmd)->transport_lun_active, 0); + atomic_set(&cmd->t_task->transport_lun_active, 0); /* * This will notify iscsi_target_transport.c: * transport_cmd_check_stop() that a LUN shutdown is in * progress for the iscsi_cmd_t. */ - spin_lock(&T_TASK(cmd)->t_state_lock); - DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport" + spin_lock(&cmd->t_task->t_state_lock); + DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task->transport" "_lun_stop for ITT: 0x%08x\n", - SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); - atomic_set(&T_TASK(cmd)->transport_lun_stop, 1); - spin_unlock(&T_TASK(cmd)->t_state_lock); + cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); + atomic_set(&cmd->t_task->transport_lun_stop, 1); + spin_unlock(&cmd->t_task->t_state_lock); spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - if (!(SE_LUN(cmd))) { + if (!(cmd->se_lun)) { printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state); + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); BUG(); } /* @@ -5428,27 +5305,27 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) * and/or stop its context. */ DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" - "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); + "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); - if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) { + if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) { spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" "_wait_for_tasks(): SUCCESS\n", - SE_LUN(cmd)->unpacked_lun, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_lun->unpacked_lun, + cmd->se_tfo->get_task_tag(cmd)); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); - if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); + if (!(atomic_read(&cmd->t_task->transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); goto check_cond; } - atomic_set(&T_TASK(cmd)->transport_dev_active, 0); + atomic_set(&cmd->t_task->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); transport_free_dev_tasks(cmd); /* @@ -5465,24 +5342,24 @@ check_cond: * be released, notify the waiting thread now that LU has * finished accessing it. */ - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags); - if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) { + spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); + if (atomic_read(&cmd->t_task->transport_lun_fe_stop)) { DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" " struct se_cmd: %p ITT: 0x%08x\n", lun->unpacked_lun, - cmd, CMD_TFO(cmd)->get_task_tag(cmd)); + cmd, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); transport_cmd_check_stop(cmd, 1, 0); - complete(&T_TASK(cmd)->transport_lun_fe_stop_comp); + complete(&cmd->t_task->transport_lun_fe_stop_comp); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", - lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd)); + lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); } spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5506,7 +5383,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { printk(KERN_ERR "Unable to start clear_lun thread\n"); - return -1; + return PTR_ERR(kt); } wait_for_completion(&lun->lun_shutdown_comp); @@ -5528,20 +5405,20 @@ static void transport_generic_wait_for_tasks( if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) return; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); /* * If we are already stopped due to an external event (ie: LUN shutdown) * sleep until the connection can have the passed struct se_cmd back. - * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by + * The cmd->t_task->transport_lun_stopped_sem will be upped by * transport_clear_lun_from_sessions() once the ConfigFS context caller * has completed its operation on the struct se_cmd. */ - if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) { + if (atomic_read(&cmd->t_task->transport_lun_stop)) { DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" - " wait_for_completion(&T_TASK(cmd)transport_lun_fe" + " wait_for_completion(&cmd->t_tasktransport_lun_fe" "_stop_comp); for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); /* * There is a special case for WRITES where a FE exception + * LUN shutdown means ConfigFS context is still sleeping on @@ -5549,10 +5426,10 @@ static void transport_generic_wait_for_tasks( * We go ahead and up transport_lun_stop_comp just to be sure * here. */ - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); - complete(&T_TASK(cmd)->transport_lun_stop_comp); - wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + complete(&cmd->t_task->transport_lun_stop_comp); + wait_for_completion(&cmd->t_task->transport_lun_fe_stop_comp); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); /* @@ -5561,39 +5438,39 @@ static void transport_generic_wait_for_tasks( * normal means below. */ DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" - " wait_for_completion(&T_TASK(cmd)transport_lun_fe_" + " wait_for_completion(&cmd->t_tasktransport_lun_fe_" "stop_comp); for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); + atomic_set(&cmd->t_task->transport_lun_stop, 0); } - if (!atomic_read(&T_TASK(cmd)->t_transport_active) || - atomic_read(&T_TASK(cmd)->t_transport_aborted)) + if (!atomic_read(&cmd->t_task->t_transport_active) || + atomic_read(&cmd->t_task->t_transport_aborted)) goto remove; - atomic_set(&T_TASK(cmd)->t_transport_stop, 1); + atomic_set(&cmd->t_task->t_transport_stop, 1); DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" - " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state, + " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state); - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); - wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq); + wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); - wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp); + wait_for_completion(&cmd->t_task->t_transport_stop_comp); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_set(&T_TASK(cmd)->t_transport_active, 0); - atomic_set(&T_TASK(cmd)->t_transport_stop, 0); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + atomic_set(&cmd->t_task->t_transport_active, 0); + atomic_set(&cmd->t_task->t_transport_stop, 0); DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" - "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n", - CMD_TFO(cmd)->get_task_tag(cmd)); + "&cmd->t_task->t_transport_stop_comp) for ITT: 0x%08x\n", + cmd->se_tfo->get_task_tag(cmd)); remove: - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); if (!remove_cmd) return; @@ -5632,13 +5509,13 @@ int transport_send_check_condition_and_sense( int offset; u8 asc = 0, ascq = 0; - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); return 0; } cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; - spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); if (!reason && from_transport) goto after_reason; @@ -5651,7 +5528,7 @@ int transport_send_check_condition_and_sense( * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE * from include/scsi/scsi_cmnd.h */ - offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd, + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); /* * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses @@ -5788,7 +5665,7 @@ int transport_send_check_condition_and_sense( cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; after_reason: - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); return 0; } EXPORT_SYMBOL(transport_send_check_condition_and_sense); @@ -5797,18 +5674,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) { int ret = 0; - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) { + if (atomic_read(&cmd->t_task->t_transport_aborted) != 0) { if (!(send_status) || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; #if 0 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", - T_TASK(cmd)->t_task_cdb[0], - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->t_task->t_task_cdb[0], + cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); ret = 1; } return ret; @@ -5824,8 +5701,8 @@ void transport_send_task_abort(struct se_cmd *cmd) * queued back to fabric module by transport_check_aborted_status(). */ if (cmd->data_direction == DMA_TO_DEVICE) { - if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) { - atomic_inc(&T_TASK(cmd)->t_transport_aborted); + if (cmd->se_tfo->write_pending_status(cmd) != 0) { + atomic_inc(&cmd->t_task->t_transport_aborted); smp_mb__after_atomic_inc(); cmd->scsi_status = SAM_STAT_TASK_ABORTED; transport_new_cmd_failure(cmd); @@ -5835,10 +5712,10 @@ void transport_send_task_abort(struct se_cmd *cmd) cmd->scsi_status = SAM_STAT_TASK_ABORTED; #if 0 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," - " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0], - CMD_TFO(cmd)->get_task_tag(cmd)); + " ITT: 0x%08x\n", cmd->t_task->t_task_cdb[0], + cmd->se_tfo->get_task_tag(cmd)); #endif - CMD_TFO(cmd)->queue_status(cmd); + cmd->se_tfo->queue_status(cmd); } /* transport_generic_do_tmr(): @@ -5848,7 +5725,7 @@ void transport_send_task_abort(struct se_cmd *cmd) int transport_generic_do_tmr(struct se_cmd *cmd) { struct se_cmd *ref_cmd; - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; @@ -5881,7 +5758,7 @@ int transport_generic_do_tmr(struct se_cmd *cmd) } cmd->t_state = TRANSPORT_ISTATE_PROCESSING; - CMD_TFO(cmd)->queue_tm_rsp(cmd); + cmd->se_tfo->queue_tm_rsp(cmd); transport_cmd_check_stop(cmd, 2, 0); return 0; @@ -5920,44 +5797,44 @@ static void transport_processing_shutdown(struct se_device *dev) */ spin_lock_irqsave(&dev->execute_task_lock, flags); while ((task = transport_get_task_from_state_list(dev))) { - if (!(TASK_CMD(task))) { - printk(KERN_ERR "TASK_CMD(task) is NULL!\n"); + if (!task->task_se_cmd) { + printk(KERN_ERR "task->task_se_cmd is NULL!\n"); continue; } - cmd = TASK_CMD(task); + cmd = task->task_se_cmd; - if (!T_TASK(cmd)) { - printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:" + if (!cmd->t_task) { + printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:" " %p ITT: 0x%08x\n", task, cmd, - CMD_TFO(cmd)->get_task_tag(cmd)); + cmd->se_tfo->get_task_tag(cmd)); continue; } spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," " i_state/def_i_state: %d/%d, t_state/def_t_state:" " %d/%d cdb: 0x%02x\n", cmd, task, - CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn, - CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state, + cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, + cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, cmd->t_state, cmd->deferred_t_state, - T_TASK(cmd)->t_task_cdb[0]); + cmd->t_task->t_task_cdb[0]); DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" " %d t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", - CMD_TFO(cmd)->get_task_tag(cmd), - T_TASK(cmd)->t_task_cdbs, - atomic_read(&T_TASK(cmd)->t_task_cdbs_left), - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent), - atomic_read(&T_TASK(cmd)->t_transport_active), - atomic_read(&T_TASK(cmd)->t_transport_stop), - atomic_read(&T_TASK(cmd)->t_transport_sent)); + cmd->se_tfo->get_task_tag(cmd), + cmd->t_task->t_task_cdbs, + atomic_read(&cmd->t_task->t_task_cdbs_left), + atomic_read(&cmd->t_task->t_task_cdbs_sent), + atomic_read(&cmd->t_task->t_transport_active), + atomic_read(&cmd->t_task->t_transport_stop), + atomic_read(&cmd->t_task->t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); DEBUG_DO("Waiting for task: %p to shutdown for dev:" " %p\n", task, dev); @@ -5965,8 +5842,8 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("Completed task: %p shutdown for dev: %p\n", task, dev); - spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags); - atomic_dec(&T_TASK(cmd)->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + atomic_dec(&cmd->t_task->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -5976,39 +5853,39 @@ static void transport_processing_shutdown(struct se_device *dev) } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) { + if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); DEBUG_DO("Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - if (atomic_read(&T_TASK(cmd)->t_transport_active)) { + if (atomic_read(&cmd->t_task->t_transport_active)) { DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" " %p\n", task, dev); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + if (atomic_read(&cmd->t_task->t_fe_count)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); transport_send_check_condition_and_sense( cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_lun->lun_se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_lun->lun_se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); @@ -6022,22 +5899,22 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", task, dev); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + if (atomic_read(&cmd->t_task->t_fe_count)) { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_lun->lun_se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &T_TASK(cmd)->t_state_lock, flags); + &cmd->t_task->t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - SE_DEV(cmd)->dev_queue_obj); + &cmd->se_lun->lun_se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) @@ -6050,18 +5927,15 @@ static void transport_processing_shutdown(struct se_device *dev) /* * Empty the struct se_device's struct se_cmd list. */ - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); - while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) { - spin_unlock_irqrestore( - &dev->dev_queue_obj->cmd_queue_lock, flags); - cmd = (struct se_cmd *)qr->cmd; + while ((qr = transport_get_qr_from_queue(&dev->dev_queue_obj))) { + cmd = qr->cmd; state = qr->state; kfree(qr); DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", cmd, state); - if (atomic_read(&T_TASK(cmd)->t_fe_count)) { + if (atomic_read(&cmd->t_task->t_fe_count)) { transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); @@ -6072,9 +5946,7 @@ static void transport_processing_shutdown(struct se_device *dev) if (transport_cmd_check_stop(cmd, 1, 0)) transport_generic_remove(cmd, 0, 0); } - spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags); } - spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags); } /* transport_processing_thread(): @@ -6091,8 +5963,8 @@ static int transport_processing_thread(void *param) set_user_nice(current, -20); while (!kthread_should_stop()) { - ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq, - atomic_read(&dev->dev_queue_obj->queue_cnt) || + ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq, + atomic_read(&dev->dev_queue_obj.queue_cnt) || kthread_should_stop()); if (ret < 0) goto out; @@ -6108,22 +5980,22 @@ static int transport_processing_thread(void *param) get_cmd: __transport_execute_tasks(dev); - qr = transport_get_qr_from_queue(dev->dev_queue_obj); + qr = transport_get_qr_from_queue(&dev->dev_queue_obj); if (!(qr)) continue; - cmd = (struct se_cmd *)qr->cmd; + cmd = qr->cmd; t_state = qr->state; kfree(qr); switch (t_state) { case TRANSPORT_NEW_CMD_MAP: - if (!(CMD_TFO(cmd)->new_cmd_map)) { - printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is" + if (!(cmd->se_tfo->new_cmd_map)) { + printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" " NULL for TRANSPORT_NEW_CMD_MAP\n"); BUG(); } - ret = CMD_TFO(cmd)->new_cmd_map(cmd); + ret = cmd->se_tfo->new_cmd_map(cmd); if (ret < 0) { cmd->transport_error_status = ret; transport_generic_request_failure(cmd, NULL, @@ -6168,9 +6040,9 @@ get_cmd: printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" " %d for ITT: 0x%08x i_state: %d on SE LUN:" " %u\n", t_state, cmd->deferred_t_state, - CMD_TFO(cmd)->get_task_tag(cmd), - CMD_TFO(cmd)->get_cmd_state(cmd), - SE_LUN(cmd)->unpacked_lun); + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), + cmd->se_lun->unpacked_lun); BUG(); } diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index df355176a377..16f41d188e26 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -80,10 +80,10 @@ int core_scsi3_ua_check( case REQUEST_SENSE: return 0; default: - return -1; + return -EINVAL; } - return -1; + return -EINVAL; } int core_scsi3_ua_allocate( @@ -98,12 +98,12 @@ int core_scsi3_ua_allocate( * PASSTHROUGH OPS */ if (!(nacl)) - return -1; + return -EINVAL; ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); if (!(ua)) { printk(KERN_ERR "Unable to allocate struct se_ua\n"); - return -1; + return -ENOMEM; } INIT_LIST_HEAD(&ua->ua_dev_list); INIT_LIST_HEAD(&ua->ua_nacl_list); @@ -179,7 +179,7 @@ int core_scsi3_ua_allocate( printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" " 0x%02x, ASCQ: 0x%02x\n", - TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun, + nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, asc, ascq); atomic_inc(&deve->ua_count); @@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition( u8 *asc, u8 *ascq) { - struct se_device *dev = SE_DEV(cmd); + struct se_device *dev = cmd->se_lun->lun_se_dev; struct se_dev_entry *deve; struct se_session *sess = cmd->se_sess; struct se_node_acl *nacl; @@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition( * highest priority UNIT_ATTENTION and ASC/ASCQ without * clearing it. */ - if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) { + if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) { *asc = ua->ua_asc; *ascq = ua->ua_ascq; break; @@ -267,10 +267,10 @@ void core_scsi3_ua_for_check_condition( printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" " reported ASC: 0x%02x, ASCQ: 0x%02x\n", - TPG_TFO(nacl->se_tpg)->get_fabric_name(), - (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" : - "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl, - cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq); + nacl->se_tpg->se_tpg_tfo->get_fabric_name(), + (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : + "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, + cmd->orig_fe_lun, cmd->t_task->t_task_cdb[0], *asc, *ascq); } int core_scsi3_ua_clear_for_request_sense( @@ -285,17 +285,17 @@ int core_scsi3_ua_clear_for_request_sense( int head = 1; if (!(sess)) - return -1; + return -EINVAL; nacl = sess->se_node_acl; if (!(nacl)) - return -1; + return -EINVAL; spin_lock_irq(&nacl->device_list_lock); deve = &nacl->device_list[cmd->orig_fe_lun]; if (!(atomic_read(&deve->ua_count))) { spin_unlock_irq(&nacl->device_list_lock); - return -1; + return -EPERM; } /* * The highest priority Unit Attentions are placed at the head of the @@ -325,8 +325,8 @@ int core_scsi3_ua_clear_for_request_sense( printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," - " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(), + " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), cmd->orig_fe_lun, *asc, *ascq); - return (head) ? -1 : 0; + return (head) ? -EPERM : 0; } diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 92a449aededa..19b2b9948314 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -72,7 +72,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) caller, cmd, cmd->cdb); printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); - task = T_TASK(se_cmd); + task = se_cmd->t_task; printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", caller, cmd, task, task->t_tasks_se_num, task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); @@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd) * TCM/LIO target */ transport_do_task_sg_chain(se_cmd); - cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained; + cmd->sg = se_cmd->t_task->t_tasks_sg_chained; cmd->sg_cnt = - T_TASK(se_cmd)->t_tasks_sg_chained_no; + se_cmd->t_task->t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, cmd->sg, cmd->sg_cnt)) @@ -670,7 +670,6 @@ static void ft_send_cmd(struct ft_cmd *cmd) err: ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); - return; } /* diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 84e868c255dd..8c5067c65720 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -582,10 +582,10 @@ int ft_register_configfs(void) * Register the top level struct config_item_type with TCM core */ fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); - if (!fabric) { + if (IS_ERR(fabric)) { printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", __func__); - return -1; + return PTR_ERR(fabric); } fabric->tf_ops = ft_fabric_ops; diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 8c4a24077d9d..47efcfb9f4b8 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -90,7 +90,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); - task = T_TASK(se_cmd); + task = se_cmd->t_task; BUG_ON(!task); remaining = se_cmd->data_length; @@ -236,7 +236,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) u32 f_ctl; void *buf; - task = T_TASK(se_cmd); + task = se_cmd->t_task; BUG_ON(!task); fh = fc_frame_header_get(fp); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 561ac99def5a..b0b83edbe453 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -205,11 +205,6 @@ typedef enum { SCSI_INDEX_TYPE_MAX } scsi_index_t; -struct scsi_index_table { - spinlock_t lock; - u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; -} ____cacheline_aligned; - struct se_cmd; struct t10_alua { @@ -235,7 +230,7 @@ struct t10_alua_lu_gp { atomic_t lu_gp_ref_cnt; spinlock_t lu_gp_lock; struct config_group lu_gp_group; - struct list_head lu_gp_list; + struct list_head lu_gp_node; struct list_head lu_gp_mem_list; } ____cacheline_aligned; @@ -291,10 +286,10 @@ struct t10_vpd { } ____cacheline_aligned; struct t10_wwn { - unsigned char vendor[8]; - unsigned char model[16]; - unsigned char revision[4]; - unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN]; + char vendor[8]; + char model[16]; + char revision[4]; + char unit_serial[INQUIRY_VPD_SERIAL_LEN]; spinlock_t t10_vpd_lock; struct se_subsystem_dev *t10_sub_dev; struct config_group t10_wwn_group; @@ -366,13 +361,13 @@ struct t10_reservation_ops { int (*t10_pr_clear)(struct se_cmd *); }; -struct t10_reservation_template { +struct t10_reservation { /* Reservation effects all target ports */ int pr_all_tg_pt; /* Activate Persistence across Target Power Loss enabled * for SCSI device */ int pr_aptpl_active; - /* Used by struct t10_reservation_template->pr_aptpl_buf_len */ + /* Used by struct t10_reservation->pr_aptpl_buf_len */ #define PR_APTPL_BUF_LEN 8192 u32 pr_aptpl_buf_len; u32 pr_generation; @@ -397,7 +392,7 @@ struct t10_reservation_template { struct se_queue_req { int state; - void *cmd; + struct se_cmd *cmd; struct list_head qr_list; } ____cacheline_aligned; @@ -495,9 +490,6 @@ struct se_task { struct list_head t_state_list; } ____cacheline_aligned; -#define TASK_CMD(task) ((task)->task_se_cmd) -#define TASK_DEV(task) ((task)->se_dev) - struct se_cmd { /* SAM response code being sent to initiator */ u8 scsi_status; @@ -552,9 +544,6 @@ struct se_cmd { void (*transport_complete_callback)(struct se_cmd *); } ____cacheline_aligned; -#define T_TASK(cmd) ((cmd)->t_task) -#define CMD_TFO(cmd) ((cmd)->se_tfo) - struct se_tmr_req { /* Task Management function to be preformed */ u8 function; @@ -617,9 +606,6 @@ struct se_session { struct list_head sess_acl_list; } ____cacheline_aligned; -#define SE_SESS(cmd) ((cmd)->se_sess) -#define SE_NODE_ACL(sess) ((sess)->se_node_acl) - struct se_device; struct se_transform_info; struct scatterlist; @@ -640,8 +626,6 @@ struct se_lun_acl { struct se_ml_stat_grps ml_stat_grps; } ____cacheline_aligned; -#define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps) - struct se_dev_entry { bool def_pr_registered; /* See transport_lunflags_table */ @@ -727,10 +711,10 @@ struct se_subsystem_dev { /* T10 Inquiry and VPD WWN Information */ struct t10_wwn t10_wwn; /* T10 SPC-2 + SPC-3 Reservations */ - struct t10_reservation_template t10_reservation; + struct t10_reservation t10_pr; spinlock_t se_dev_lock; void *se_dev_su_ptr; - struct list_head g_se_dev_list; + struct list_head se_dev_node; struct config_group se_dev_group; /* For T10 Reservations */ struct config_group se_dev_pr_group; @@ -738,11 +722,6 @@ struct se_subsystem_dev { struct se_dev_stat_grps dev_stat_grps; } ____cacheline_aligned; -#define T10_ALUA(su_dev) (&(su_dev)->t10_alua) -#define T10_RES(su_dev) (&(su_dev)->t10_reservation) -#define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops) -#define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps) - struct se_device { /* Set to 1 if thread is NOT sleeping on thread_sem */ u8 thread_active; @@ -783,8 +762,7 @@ struct se_device { struct se_obj dev_obj; struct se_obj dev_access_obj; struct se_obj dev_export_obj; - struct se_queue_obj *dev_queue_obj; - struct se_queue_obj *dev_status_queue_obj; + struct se_queue_obj dev_queue_obj; spinlock_t delayed_cmd_lock; spinlock_t ordered_cmd_lock; spinlock_t execute_task_lock; @@ -824,11 +802,6 @@ struct se_device { struct list_head g_se_dev_list; } ____cacheline_aligned; -#define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev) -#define SU_DEV(dev) ((dev)->se_sub_dev) -#define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib) -#define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn) - struct se_hba { u16 hba_tpgt; u32 hba_id; @@ -837,24 +810,17 @@ struct se_hba { /* Virtual iSCSI devices attached. */ u32 dev_count; u32 hba_index; - atomic_t load_balance_queue; - atomic_t left_queue_depth; - /* Maximum queue depth the HBA can handle. */ - atomic_t max_queue_depth; /* Pointer to transport specific host structure. */ void *hba_ptr; /* Linked list for struct se_device */ struct list_head hba_dev_list; - struct list_head hba_list; + struct list_head hba_node; spinlock_t device_lock; - spinlock_t hba_queue_lock; struct config_group hba_group; struct mutex hba_access_mutex; struct se_subsystem_api *transport; } ____cacheline_aligned; -#define SE_HBA(dev) ((dev)->se_hba) - struct se_port_stat_grps { struct config_group stat_group; struct config_group scsi_port_group; @@ -881,9 +847,6 @@ struct se_lun { struct se_port_stat_grps port_stat_grps; } ____cacheline_aligned; -#define SE_LUN(cmd) ((cmd)->se_lun) -#define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps) - struct scsi_port_stats { u64 cmd_pdus; u64 tx_data_octets; @@ -930,7 +893,7 @@ struct se_portal_group { spinlock_t tpg_lun_lock; /* Pointer to $FABRIC_MOD portal group */ void *se_tpg_fabric_ptr; - struct list_head se_tpg_list; + struct list_head se_tpg_node; /* linked list for initiator ACL list */ struct list_head acl_node_list; struct se_lun *tpg_lun_list; @@ -949,8 +912,6 @@ struct se_portal_group { struct config_group tpg_param_group; } ____cacheline_aligned; -#define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo) - struct se_wwn { struct target_fabric_configfs *wwn_tf; struct config_group wwn_group; @@ -958,28 +919,4 @@ struct se_wwn { struct config_group fabric_stat_group; } ____cacheline_aligned; -struct se_global { - u16 alua_lu_gps_counter; - int g_sub_api_initialized; - u32 in_shutdown; - u32 alua_lu_gps_count; - u32 g_hba_id_counter; - struct config_group target_core_hbagroup; - struct config_group alua_group; - struct config_group alua_lu_gps_group; - struct list_head g_lu_gps_list; - struct list_head g_se_tpg_list; - struct list_head g_hba_list; - struct list_head g_se_dev_list; - struct se_hba *g_lun0_hba; - struct se_subsystem_dev *g_lun0_su_dev; - struct se_device *g_lun0_dev; - struct t10_alua_lu_gp *default_lu_gp; - spinlock_t g_device_lock; - spinlock_t hba_lock; - spinlock_t se_tpg_lock; - spinlock_t lu_gps_lock; - spinlock_t plugin_class_lock; -} ____cacheline_aligned; - #endif /* TARGET_CORE_BASE_H */ diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 24a1c6cb83c3..1dd4d1841497 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -111,9 +111,8 @@ struct se_subsystem_api; extern struct kmem_cache *se_mem_cache; -extern int init_se_global(void); -extern void release_se_global(void); -extern void init_scsi_index_table(void); +extern int init_se_kmem_caches(void); +extern void release_se_kmem_caches(void); extern u32 scsi_get_new_index(scsi_index_t); extern void transport_init_queue_obj(struct se_queue_obj *); extern int transport_subsystem_check_init(void); @@ -184,7 +183,7 @@ extern void transport_send_task_abort(struct se_cmd *); extern void transport_release_cmd_to_pool(struct se_cmd *); extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int); -extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32); +extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, void *, struct se_mem *, struct se_mem **, u32 *, u32 *); @@ -352,9 +351,4 @@ struct se_subsystem_api { unsigned char *(*get_sense_buffer)(struct se_task *); } ____cacheline_aligned; -#define TRANSPORT(dev) ((dev)->transport) -#define HBA_TRANSPORT(hba) ((hba)->transport) - -extern struct se_global *se_global; - #endif /* TARGET_CORE_TRANSPORT_H */ -- cgit v1.2.3 From 5951146dea1ac8ff2f177477c907084d63913cad Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 19 Jul 2011 10:26:37 +0000 Subject: target: More core cleanups from AGrover (round 2) This patch contains the squashed version of second round of target core cleanups and simplifications and Andy and Co. It also contains a handful of fixes to address bugs the original series and other minor cleanups. Here is the condensed shortlog: target: Remove unneeded casts to void* target: Rename get_lun_for_{cmd,tmr} to lookup_{cmd,tmr}_lun target: Make t_task a member of se_cmd, not a pointer target: Handle functions returning "-2" target: Use cmd->se_dev over cmd->se_lun->lun_se_dev target: Embed qr in struct se_cmd target: Replace embedded struct se_queue_req with a list_head target: Rename list_heads that are nodes in struct se_cmd to "*_node" target: Fold transport_device_setup_cmd() into lookup_{tmr,cmd}_lun() target: Make t_mem_list and t_mem_list_bidi members of t_task target: Add comment & cleanup transport_map_sg_to_mem() target: Remove unneeded checks in transport_free_pages() (Roland: Fix se_queue_req removal leftovers OOPs) (nab: Fix transport_lookup_tmr_lun failure case) (nab: Fix list_empty(&cmd->t_task.t_mem_bidi_list) inversion bugs) Signed-off-by: Andy Grover Signed-off-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 56 +- drivers/target/target_core_alua.c | 8 +- drivers/target/target_core_cdb.c | 62 +- drivers/target/target_core_configfs.c | 4 +- drivers/target/target_core_device.c | 167 +++-- drivers/target/target_core_file.c | 12 +- drivers/target/target_core_iblock.c | 14 +- drivers/target/target_core_pr.c | 86 +-- drivers/target/target_core_pscsi.c | 20 +- drivers/target/target_core_rd.c | 14 +- drivers/target/target_core_tmr.c | 89 +-- drivers/target/target_core_transport.c | 1106 ++++++++++++++------------------ drivers/target/target_core_ua.c | 4 +- drivers/target/tcm_fc/tfc_cmd.c | 26 +- drivers/target/tcm_fc/tfc_conf.c | 2 +- drivers/target/tcm_fc/tfc_io.c | 10 +- include/target/target_core_base.h | 17 +- include/target/target_core_device.h | 4 +- include/target/target_core_transport.h | 1 - 19 files changed, 761 insertions(+), 941 deletions(-) (limited to 'include') diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 2f19e1926493..eeb7ee7ab9f7 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -118,17 +118,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi */ if (scsi_bidi_cmnd(sc)) - se_cmd->t_task->t_tasks_bidi = 1; + se_cmd->t_task.t_tasks_bidi = 1; /* * Locate the struct se_lun pointer and attach it to struct se_cmd */ - if (transport_get_lun_for_cmd(se_cmd, tl_cmd->sc->device->lun) < 0) { + if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) { kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); set_host_byte(sc, DID_NO_CONNECT); return NULL; } - transport_device_setup_cmd(se_cmd); return se_cmd; } @@ -143,17 +142,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - void *mem_ptr, *mem_bidi_ptr = NULL; - u32 sg_no_bidi = 0; + struct scatterlist *sgl_bidi = NULL; + u32 sgl_bidi_count = 0; int ret; /* * Allocate the necessary tasks to complete the received CDB+data */ - ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd); - if (ret == -1) { + ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd); + if (ret == -ENOMEM) { /* Out of Resources */ return PYX_TRANSPORT_LU_COMM_FAILURE; - } else if (ret == -2) { + } else if (ret == -EINVAL) { /* * Handle case for SAM_STAT_RESERVATION_CONFLICT */ @@ -165,35 +164,24 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) */ return PYX_TRANSPORT_USE_SENSE_REASON; } + /* - * Setup the struct scatterlist memory from the received - * struct scsi_cmnd. + * For BIDI commands, pass in the extra READ buffer + * to transport_generic_map_mem_to_cmd() below.. */ - if (scsi_sg_count(sc)) { - se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM; - mem_ptr = (void *)scsi_sglist(sc); - /* - * For BIDI commands, pass in the extra READ buffer - * to transport_generic_map_mem_to_cmd() below.. - */ - if (se_cmd->t_task->t_tasks_bidi) { - struct scsi_data_buffer *sdb = scsi_in(sc); + if (se_cmd->t_task.t_tasks_bidi) { + struct scsi_data_buffer *sdb = scsi_in(sc); - mem_bidi_ptr = (void *)sdb->table.sgl; - sg_no_bidi = sdb->table.nents; - } - } else { - /* - * Used for DMA_NONE - */ - mem_ptr = NULL; + sgl_bidi = sdb->table.sgl; + sgl_bidi_count = sdb->table.nents; } + /* * Map the SG memory into struct se_mem->page linked list using the same * physical memory at sg->page_link. */ - ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr, - scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi); + ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), + scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); if (ret < 0) return PYX_TRANSPORT_LU_COMM_FAILURE; @@ -384,14 +372,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) /* * Allocate the LUN_RESET TMR */ - se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr, + se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, TMR_LUN_RESET); if (IS_ERR(se_cmd->se_tmr_req)) goto release; /* * Locate the underlying TCM struct se_lun from sc->device->lun */ - if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0) + if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0) goto release; /* * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp() @@ -904,7 +892,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) || (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) { - memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer, + memcpy(sc->sense_buffer, se_cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); sc->result = SAM_STAT_CHECK_CONDITION; set_driver_byte(sc, DRIVER_SENSE); @@ -1054,7 +1042,7 @@ static int tcm_loop_make_nexus( * transport_register_session() */ __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, - tl_nexus->se_sess, (void *)tl_nexus); + tl_nexus->se_sess, tl_nexus); tl_tpg->tl_hba->tl_nexus = tl_nexus; printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), @@ -1242,7 +1230,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg( * Register the tl_tpg as a emulated SAS TCM Target Endpoint */ ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops, - wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg, + wwn, &tl_tpg->tl_se_tpg, tl_tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) return ERR_PTR(-ENOMEM); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index bfc42adea510..76abd86b6a73 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -61,11 +61,11 @@ struct t10_alua_lu_gp *default_lu_gp; */ int core_emulate_report_target_port_groups(struct se_cmd *cmd) { - struct se_subsystem_dev *su_dev = cmd->se_lun->lun_se_dev->se_sub_dev; + struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ @@ -151,13 +151,13 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) */ int core_emulate_set_target_port_groups(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; struct se_port *port, *l_port = cmd->se_lun->lun_sep; struct se_node_acl *nacl = cmd->se_sess->se_node_acl; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ u32 len = 4; /* Skip over RESERVED area in header */ int alua_access_state, primary = 0, rc; diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 7d9ccf3aa9c3..95195d718101 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -65,8 +65,8 @@ static int target_emulate_inquiry_std(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf; /* * Make sure we at least have 6 bytes of INQUIRY response @@ -128,7 +128,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) * Registered Extended LUN WWN has been set via ConfigFS * during device creation/restart. */ - if (cmd->se_lun->lun_se_dev->se_sub_dev->su_dev_flags & + if (cmd->se_dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL) { buf[3] = 3; buf[5] = 0x80; @@ -143,7 +143,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; u16 len = 0; buf[1] = 0x80; @@ -176,7 +176,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_lun *lun = cmd->se_lun; struct se_port *port = NULL; struct se_portal_group *tpg = NULL; @@ -477,7 +477,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) buf[5] = 0x07; /* If WriteCache emulation is enabled, set V_SUP */ - if (cmd->se_lun->lun_se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) + if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) buf[6] = 0x01; return 0; } @@ -486,7 +486,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; int have_tp = 0; /* @@ -568,7 +568,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * From sbc3r22 section 6.5.4 Thin Provisioning VPD page: @@ -620,9 +620,9 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) static int target_emulate_inquiry(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf; - unsigned char *cdb = cmd->t_task->t_task_cdb; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf; + unsigned char *cdb = cmd->t_task.t_task_cdb; if (!(cdb[1] & 0x1)) return target_emulate_inquiry_std(cmd); @@ -665,8 +665,8 @@ target_emulate_inquiry(struct se_cmd *cmd) static int target_emulate_readcapacity(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); u32 blocks; @@ -695,8 +695,8 @@ target_emulate_readcapacity(struct se_cmd *cmd) static int target_emulate_readcapacity_16(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf; unsigned long long blocks = dev->transport->get_blocks(dev); buf[0] = (blocks >> 56) & 0xff; @@ -830,9 +830,9 @@ target_modesense_dpofua(unsigned char *buf, int type) static int target_emulate_modesense(struct se_cmd *cmd, int ten) { - struct se_device *dev = cmd->se_lun->lun_se_dev; - char *cdb = cmd->t_task->t_task_cdb; - unsigned char *rbuf = cmd->t_task->t_task_buf; + struct se_device *dev = cmd->se_dev; + char *cdb = cmd->t_task.t_task_cdb; + unsigned char *rbuf = cmd->t_task.t_task_buf; int type = dev->transport->get_device_type(dev); int offset = (ten) ? 8 : 4; int length = 0; @@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) static int target_emulate_request_sense(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task->t_task_cdb; - unsigned char *buf = cmd->t_task->t_task_buf; + unsigned char *cdb = cmd->t_task.t_task_cdb; + unsigned char *buf = cmd->t_task.t_task_buf; u8 ua_asc = 0, ua_ascq = 0; if (cdb[1] & 0x01) { @@ -964,9 +964,9 @@ static int target_emulate_unmap(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; - unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL; - unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + struct se_device *dev = cmd->se_dev; + unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL; + unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; sector_t lba; unsigned int size = cmd->data_length, range; int ret, offset; @@ -1011,8 +1011,8 @@ static int target_emulate_write_same(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; - sector_t lba = cmd->t_task->t_task_lba; + struct se_device *dev = cmd->se_dev; + sector_t lba = cmd->t_task.t_task_lba; unsigned int range; int ret; @@ -1036,11 +1036,11 @@ int transport_emulate_control_cdb(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; unsigned short service_action; int ret = 0; - switch (cmd->t_task->t_task_cdb[0]) { + switch (cmd->t_task.t_task_cdb[0]) { case INQUIRY: ret = target_emulate_inquiry(cmd); break; @@ -1054,13 +1054,13 @@ transport_emulate_control_cdb(struct se_task *task) ret = target_emulate_modesense(cmd, 1); break; case SERVICE_ACTION_IN: - switch (cmd->t_task->t_task_cdb[1] & 0x1f) { + switch (cmd->t_task.t_task_cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: ret = target_emulate_readcapacity_16(cmd); break; default: printk(KERN_ERR "Unsupported SA: 0x%02x\n", - cmd->t_task->t_task_cdb[1] & 0x1f); + cmd->t_task.t_task_cdb[1] & 0x1f); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } break; @@ -1085,7 +1085,7 @@ transport_emulate_control_cdb(struct se_task *task) break; case VARIABLE_LENGTH_CMD: service_action = - get_unaligned_be16(&cmd->t_task->t_task_cdb[8]); + get_unaligned_be16(&cmd->t_task.t_task_cdb[8]); switch (service_action) { case WRITE_SAME_32: if (!dev->transport->do_discard) { @@ -1124,7 +1124,7 @@ transport_emulate_control_cdb(struct se_task *task) break; default: printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", - cmd->t_task->t_task_cdb[0], dev->transport->name); + cmd->t_task.t_task_cdb[0], dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 64418efa671b..ac7f7655570e 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -2037,7 +2037,7 @@ static ssize_t target_core_dev_show(struct config_item *item, if (!(tc_attr->show)) return -EINVAL; - return tc_attr->show((void *)se_dev, page); + return tc_attr->show(se_dev, page); } static ssize_t target_core_dev_store(struct config_item *item, @@ -2053,7 +2053,7 @@ static ssize_t target_core_dev_store(struct config_item *item, if (!(tc_attr->store)) return -EINVAL; - return tc_attr->store((void *)se_dev, page, count); + return tc_attr->store(se_dev, page, count); } static struct configfs_item_operations target_core_dev_item_ops = { diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index fd923854505c..ea92f75d215e 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -59,15 +59,12 @@ static struct se_subsystem_dev *lun0_su_dev; /* not static, needed by tpg.c */ struct se_device *g_lun0_dev; -int transport_get_lun_for_cmd( - struct se_cmd *se_cmd, - u32 unpacked_lun) +int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) { - struct se_dev_entry *deve; struct se_lun *se_lun = NULL; struct se_session *se_sess = se_cmd->se_sess; + struct se_device *dev; unsigned long flags; - int read_only = 0; if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; @@ -76,91 +73,87 @@ int transport_get_lun_for_cmd( } spin_lock_irq(&se_sess->se_node_acl->device_list_lock); - deve = se_cmd->se_deve = - &se_sess->se_node_acl->device_list[unpacked_lun]; - if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { - if (se_cmd) { - deve->total_cmds++; - deve->total_bytes += se_cmd->data_length; - - if (se_cmd->data_direction == DMA_TO_DEVICE) { - if (deve->lun_flags & - TRANSPORT_LUNFLAGS_READ_ONLY) { - read_only = 1; - goto out; - } - deve->write_bytes += se_cmd->data_length; - } else if (se_cmd->data_direction == - DMA_FROM_DEVICE) { - deve->read_bytes += se_cmd->data_length; - } + se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; + if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { + struct se_dev_entry *deve = se_cmd->se_deve; + + deve->total_cmds++; + deve->total_bytes += se_cmd->data_length; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { + se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + " Access for 0x%08x\n", + se_cmd->se_tfo->get_fabric_name(), + unpacked_lun); + spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); + return -EACCES; } + + if (se_cmd->data_direction == DMA_TO_DEVICE) + deve->write_bytes += se_cmd->data_length; + else if (se_cmd->data_direction == DMA_FROM_DEVICE) + deve->read_bytes += se_cmd->data_length; + deve->deve_cmds++; - se_lun = se_cmd->se_lun = deve->se_lun; + se_lun = deve->se_lun; + se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } -out: spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); if (!se_lun) { - if (read_only) { - se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + /* + * Use the se_portal_group->tpg_virt_lun0 to allow for + * REPORT_LUNS, et al to be returned when no active + * MappedLUN=0 exists for this Initiator Port. + */ + if (unpacked_lun != 0) { + se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", se_cmd->se_tfo->get_fabric_name(), unpacked_lun); + return -ENODEV; + } + /* + * Force WRITE PROTECT for virtual LUN 0 + */ + if ((se_cmd->data_direction != DMA_FROM_DEVICE) && + (se_cmd->data_direction != DMA_NONE)) { + se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; + se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -EACCES; - } else { - /* - * Use the se_portal_group->tpg_virt_lun0 to allow for - * REPORT_LUNS, et al to be returned when no active - * MappedLUN=0 exists for this Initiator Port. - */ - if (unpacked_lun != 0) { - se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" - " Access for 0x%08x\n", - se_cmd->se_tfo->get_fabric_name(), - unpacked_lun); - return -ENODEV; - } - /* - * Force WRITE PROTECT for virtual LUN 0 - */ - if ((se_cmd->data_direction != DMA_FROM_DEVICE) && - (se_cmd->data_direction != DMA_NONE)) { - se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; - se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - return -EACCES; - } -#if 0 - printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", - se_cmd->se_tfo->get_fabric_name()); -#endif - se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; - se_cmd->orig_fe_lun = 0; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; - se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } + + se_lun = &se_sess->se_tpg->tpg_virt_lun0; + se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; + se_cmd->orig_fe_lun = 0; + se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } /* * Determine if the struct se_lun is online. + * FIXME: Check for LUN_RESET + UNIT Attention */ -/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -ENODEV; } - { - struct se_device *dev = se_lun->lun_se_dev; + /* Directly associate cmd with se_dev */ + se_cmd->se_dev = se_lun->lun_se_dev; + + /* TODO: get rid of this and use atomics for stats */ + dev = se_lun->lun_se_dev; spin_lock_irq(&dev->stats_lock); dev->num_cmds++; if (se_cmd->data_direction == DMA_TO_DEVICE) @@ -168,30 +161,22 @@ out: else if (se_cmd->data_direction == DMA_FROM_DEVICE) dev->read_bytes += se_cmd->data_length; spin_unlock_irq(&dev->stats_lock); - } /* * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used * for tracking state of struct se_cmds during LUN shutdown events. */ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); - list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); - atomic_set(&se_cmd->t_task->transport_lun_active, 1); -#if 0 - printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", - se_cmd->se_tfo->get_task_tag(se_cmd), se_lun->unpacked_lun); -#endif + list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); + atomic_set(&se_cmd->t_task.transport_lun_active, 1); spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); return 0; } -EXPORT_SYMBOL(transport_get_lun_for_cmd); +EXPORT_SYMBOL(transport_lookup_cmd_lun); -int transport_get_lun_for_tmr( - struct se_cmd *se_cmd, - u32 unpacked_lun) +int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) { - struct se_device *dev = NULL; struct se_dev_entry *deve; struct se_lun *se_lun = NULL; struct se_session *se_sess = se_cmd->se_sess; @@ -204,15 +189,16 @@ int transport_get_lun_for_tmr( } spin_lock_irq(&se_sess->se_node_acl->device_list_lock); - deve = se_cmd->se_deve = - &se_sess->se_node_acl->device_list[unpacked_lun]; + se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun]; + deve = se_cmd->se_deve; + if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { - se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; - dev = se_lun->lun_se_dev; + se_tmr->tmr_lun = deve->se_lun; + se_cmd->se_lun = deve->se_lun; + se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; -/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ + se_cmd->se_orig_obj_ptr = se_cmd->se_dev; } spin_unlock_irq(&se_sess->se_node_acl->device_list_lock); @@ -226,21 +212,24 @@ int transport_get_lun_for_tmr( } /* * Determine if the struct se_lun is online. + * FIXME: Check for LUN_RESET + UNIT Attention */ -/* #warning FIXME: Check for LUN_RESET + UNIT Attention */ if (se_dev_check_online(se_lun->lun_se_dev) != 0) { se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; return -ENODEV; } - se_tmr->tmr_dev = dev; - spin_lock(&dev->se_tmr_lock); - list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); - spin_unlock(&dev->se_tmr_lock); + /* Directly associate cmd with se_dev */ + se_cmd->se_dev = se_lun->lun_se_dev; + se_tmr->tmr_dev = se_lun->lun_se_dev; + + spin_lock(&se_tmr->tmr_dev->se_tmr_lock); + list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list); + spin_unlock(&se_tmr->tmr_dev->se_tmr_lock); return 0; } -EXPORT_SYMBOL(transport_get_lun_for_tmr); +EXPORT_SYMBOL(transport_lookup_tmr_lun); /* * This function is called from core_scsi3_emulate_pro_register_and_move() @@ -667,10 +656,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) struct se_lun *se_lun; struct se_session *se_sess = se_cmd->se_sess; struct se_task *se_task; - unsigned char *buf = se_cmd->t_task->t_task_buf; + unsigned char *buf = se_cmd->t_task.t_task_buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - list_for_each_entry(se_task, &se_cmd->t_task->t_task_list, t_list) + list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list) break; if (!(se_task)) { diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 0c44bc051484..2e7ea7457501 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -223,7 +223,7 @@ static struct se_device *fd_create_virtdevice( dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; dev = transport_add_device_to_core_hba(hba, &fileio_template, - se_dev, dev_flags, (void *)fd_dev, + se_dev, dev_flags, fd_dev, &dev_limits, "FILEIO", FD_VERSION); if (!(dev)) goto fail; @@ -279,7 +279,7 @@ fd_alloc_task(struct se_cmd *cmd) return NULL; } - fd_req->fd_dev = cmd->se_lun->lun_se_dev->dev_ptr; + fd_req->fd_dev = cmd->se_dev->dev_ptr; return &fd_req->fd_task; } @@ -377,7 +377,7 @@ static void fd_emulate_sync_cache(struct se_task *task) struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; - int immed = (cmd->t_task->t_task_cdb[1] & 0x2); + int immed = (cmd->t_task.t_task_cdb[1] & 0x2); loff_t start, end; int ret; @@ -391,11 +391,11 @@ static void fd_emulate_sync_cache(struct se_task *task) /* * Determine if we will be flushing the entire device. */ - if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) { + if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) { start = 0; end = LLONG_MAX; } else { - start = cmd->t_task->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; + start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; if (cmd->data_length) end = start + cmd->data_length; else @@ -475,7 +475,7 @@ static int fd_do_task(struct se_task *task) if (ret > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - cmd->t_task->t_tasks_fua) { + cmd->t_task.t_tasks_fua) { /* * We might need to be a bit smarter here * and return some sense data to let the initiator diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index fb159876fffc..c73baefeab8e 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -74,7 +74,7 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) ib_host->iblock_host_id = host_id; - hba->hba_ptr = (void *) ib_host; + hba->hba_ptr = ib_host; printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, @@ -172,7 +172,7 @@ static struct se_device *iblock_create_virtdevice( ib_dev->ibd_bd = bd; dev = transport_add_device_to_core_hba(hba, - &iblock_template, se_dev, dev_flags, (void *)ib_dev, + &iblock_template, se_dev, dev_flags, ib_dev, &dev_limits, "IBLOCK", IBLOCK_VERSION); if (!(dev)) goto failed; @@ -240,7 +240,7 @@ iblock_alloc_task(struct se_cmd *cmd) return NULL; } - ib_req->ib_dev = cmd->se_lun->lun_se_dev->dev_ptr; + ib_req->ib_dev = cmd->se_dev->dev_ptr; atomic_set(&ib_req->ib_bio_cnt, 0); return &ib_req->ib_task; } @@ -331,7 +331,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; - int immed = (cmd->t_task->t_task_cdb[1] & 0x2); + int immed = (cmd->t_task.t_task_cdb[1] & 0x2); sector_t error_sector; int ret; @@ -400,7 +400,7 @@ static int iblock_do_task(struct se_task *task) */ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - task->task_se_cmd->t_task->t_tasks_fua)) + task->task_se_cmd->t_task.t_tasks_fua)) rw = WRITE_FUA; else rw = WRITE; @@ -593,7 +593,7 @@ static struct bio *iblock_get_bio( DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); bio->bi_bdev = ib_dev->ibd_bd; - bio->bi_private = (void *) task; + bio->bi_private = task; bio->bi_destructor = iblock_bio_destructor; bio->bi_end_io = &iblock_bio_done; bio->bi_sector = lba; @@ -608,7 +608,7 @@ static struct bio *iblock_get_bio( static int iblock_map_task_SG(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct iblock_dev *ib_dev = task->se_dev->dev_ptr; struct iblock_req *ib_req = IBLOCK_REQ(task); struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 27a7525971b9..19406a3474c3 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; - if ((cmd->t_task->t_task_cdb[1] & 0x01) && - (cmd->t_task->t_task_cdb[1] & 0x02)) { + if ((cmd->t_task.t_task_cdb[1] & 0x01) && + (cmd->t_task.t_task_cdb[1] & 0x02)) { printk(KERN_ERR "LongIO and Obselete Bits set, returning" " ILLEGAL_REQUEST\n"); return PYX_TRANSPORT_ILLEGAL_REQUEST; @@ -216,7 +216,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; struct t10_reservation *pr_tmpl = &su_dev->t10_pr; - unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); int conflict = 0; @@ -1471,7 +1471,7 @@ static int core_scsi3_decode_spec_i_port( int all_tg_pt, int aptpl) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_port *tmp_port; struct se_portal_group *dest_tpg = NULL, *tmp_tpg; struct se_session *se_sess = cmd->se_sess; @@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port( struct list_head tid_dest_list; struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; struct target_core_fabric_ops *tmp_tf_ops; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tpdl, tid_len = 0; @@ -1509,7 +1509,7 @@ static int core_scsi3_decode_spec_i_port( tidh_new->dest_node_acl = se_sess->se_node_acl; tidh_new->dest_se_deve = local_se_deve; - local_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, + local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, local_se_deve, l_isid, sa_res_key, all_tg_pt, aptpl); if (!(local_pr_reg)) { @@ -1741,7 +1741,7 @@ static int core_scsi3_decode_spec_i_port( * and then call __core_scsi3_add_registration() in the * 2nd loop which will never fail. */ - dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, + dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, all_tg_pt, aptpl); if (!(dest_pr_reg)) { @@ -1787,7 +1787,7 @@ static int core_scsi3_decode_spec_i_port( prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); - __core_scsi3_add_registration(cmd->se_lun->lun_se_dev, dest_node_acl, + __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, dest_pr_reg, 0, 0); printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" @@ -2071,7 +2071,7 @@ static int core_scsi3_emulate_pro_register( int ignore_key) { struct se_session *se_sess = cmd->se_sess; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve; struct se_lun *se_lun = cmd->se_lun; struct se_portal_group *se_tpg; @@ -2117,7 +2117,7 @@ static int core_scsi3_emulate_pro_register( * Port Endpoint that the PRO was received from on the * Logical Unit of the SCSI device server. */ - ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, + ret = core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, se_deve, isid_ptr, sa_res_key, all_tg_pt, aptpl, ignore_key, 0); @@ -2145,7 +2145,7 @@ static int core_scsi3_emulate_pro_register( */ if (!(aptpl)) { pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); printk("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER\n"); return 0; @@ -2155,10 +2155,10 @@ static int core_scsi3_emulate_pro_register( * update the APTPL metadata information using its * preallocated *pr_reg->pr_aptpl_buf. */ - pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) { @@ -2223,7 +2223,7 @@ static int core_scsi3_emulate_pro_register( */ if (!(sa_res_key)) { pr_holder = core_scsi3_check_implict_release( - cmd->se_lun->lun_se_dev, pr_reg); + cmd->se_dev, pr_reg); if (pr_holder < 0) { kfree(pr_aptpl_buf); core_scsi3_put_pr_reg(pr_reg); @@ -2260,7 +2260,7 @@ static int core_scsi3_emulate_pro_register( /* * Release the calling I_T Nexus registration now.. */ - __core_scsi3_free_registration(cmd->se_lun->lun_se_dev, pr_reg, + __core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1); /* * From spc4r17, section 5.7.11.3 Unregistering @@ -2315,7 +2315,7 @@ static int core_scsi3_emulate_pro_register( * READ_KEYS service action. */ pr_reg->pr_res_generation = core_scsi3_pr_generation( - cmd->se_lun->lun_se_dev); + cmd->se_dev); pr_reg->pr_res_key = sa_res_key; printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" " Key for %s to: 0x%016Lx PRgeneration:" @@ -2398,7 +2398,7 @@ static int core_scsi3_pro_reserve( /* * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -2527,7 +2527,7 @@ static int core_scsi3_pro_reserve( spin_unlock(&dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -2758,7 +2758,7 @@ static int core_scsi3_emulate_pro_release( write_aptpl: if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -2783,7 +2783,7 @@ static int core_scsi3_emulate_pro_clear( /* * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg_n)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -2849,7 +2849,7 @@ static int core_scsi3_emulate_pro_clear( cmd->se_tfo->get_fabric_name()); if (pr_tmpl->pr_aptpl_active) { - core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" " for CLEAR\n"); } @@ -2954,7 +2954,7 @@ static int core_scsi3_pro_preempt( u64 sa_res_key, int abort) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve; struct se_node_acl *pr_reg_nacl; struct se_session *se_sess = cmd->se_sess; @@ -2969,7 +2969,7 @@ static int core_scsi3_pro_preempt( return PYX_TRANSPORT_LU_COMM_FAILURE; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; - pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, + pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg_n)) { printk(KERN_ERR "SPC-3 PR: Unable to locate" @@ -3111,7 +3111,7 @@ static int core_scsi3_pro_preempt( spin_unlock(&dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3121,7 +3121,7 @@ static int core_scsi3_pro_preempt( } core_scsi3_put_pr_reg(pr_reg_n); - core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); + core_scsi3_pr_generation(cmd->se_dev); return 0; } /* @@ -3247,7 +3247,7 @@ static int core_scsi3_pro_preempt( } if (pr_tmpl->pr_aptpl_active) { - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3256,7 +3256,7 @@ static int core_scsi3_pro_preempt( } core_scsi3_put_pr_reg(pr_reg_n); - core_scsi3_pr_generation(cmd->se_lun->lun_se_dev); + core_scsi3_pr_generation(cmd->se_dev); return 0; } @@ -3298,7 +3298,7 @@ static int core_scsi3_emulate_pro_register_and_move( int unreg) { struct se_session *se_sess = cmd->se_sess; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *se_deve, *dest_se_deve = NULL; struct se_lun *se_lun = cmd->se_lun; struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL; @@ -3307,7 +3307,7 @@ static int core_scsi3_emulate_pro_register_and_move( struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; unsigned char *initiator_str; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tid_len, tmp_tid_len; @@ -3330,7 +3330,7 @@ static int core_scsi3_emulate_pro_register_and_move( * * Locate the existing *pr_reg via struct se_node_acl pointers */ - pr_reg = core_scsi3_locate_pr_reg(cmd->se_lun->lun_se_dev, se_sess->se_node_acl, + pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if (!(pr_reg)) { printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" @@ -3612,7 +3612,7 @@ after_iport_check: dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, iport_ptr); if (!(dest_pr_reg)) { - ret = core_scsi3_alloc_registration(cmd->se_lun->lun_se_dev, + ret = core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, 0, aptpl, 2, 1); if (ret != 0) { @@ -3683,12 +3683,12 @@ after_iport_check: */ if (!(aptpl)) { pr_tmpl->pr_aptpl_active = 0; - core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, NULL, 0); + core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); printk("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER_AND_MOVE\n"); } else { pr_tmpl->pr_aptpl_active = 1; - ret = core_scsi3_update_and_write_aptpl(cmd->se_lun->lun_se_dev, + ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &dest_pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); if (!(ret)) @@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) */ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) { - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u64 res_key, sa_res_key; int sa, scope, type, aptpl; int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; @@ -3827,10 +3827,10 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) */ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) { - struct se_device *se_dev = cmd->se_lun->lun_se_dev; + struct se_device *se_dev = cmd->se_dev; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u32 add_len = 0, off = 8; if (cmd->data_length < 8) { @@ -3882,10 +3882,10 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) */ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) { - struct se_device *se_dev = cmd->se_lun->lun_se_dev; + struct se_device *se_dev = cmd->se_dev; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u64 pr_res_key; u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ @@ -3963,9 +3963,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) */ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u16 add_len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { @@ -4014,13 +4014,13 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) */ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) { - struct se_device *se_dev = cmd->se_lun->lun_se_dev; + struct se_device *se_dev = cmd->se_dev; struct se_node_acl *se_nacl; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_tmp; struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task->t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; u32 off = 8; /* off into first Full Status descriptor */ int format_code = 0; @@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) int core_scsi3_emulate_pr(struct se_cmd *cmd) { - unsigned char *cdb = &cmd->t_task->t_task_cdb[0]; + unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; struct se_device *dev = cmd->se_dev; /* * Following spc2r20 5.5.1 Reservations overview: diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 44a79a5c6d32..ecfe889cb0ce 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -72,7 +72,7 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) phv->phv_host_id = host_id; phv->phv_mode = PHV_VIRUTAL_HOST_ID; - hba->hba_ptr = (void *)phv; + hba->hba_ptr = phv; printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, @@ -355,7 +355,7 @@ static struct se_device *pscsi_add_device_to_list( pdv->pdv_sd = sd; dev = transport_add_device_to_core_hba(hba, &pscsi_template, - se_dev, dev_flags, (void *)pdv, + se_dev, dev_flags, pdv, &dev_limits, NULL, NULL); if (!(dev)) { pdv->pdv_sd = NULL; @@ -394,7 +394,7 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) pdv->pdv_se_hba = hba; printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); - return (void *)pdv; + return pdv; } /* @@ -697,7 +697,7 @@ static int pscsi_transport_complete(struct se_task *task) if (task->task_se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = task->task_se_cmd->t_task->t_task_buf; + unsigned char *buf = task->task_se_cmd->t_task.t_task_buf; if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -763,7 +763,7 @@ static struct se_task * pscsi_alloc_task(struct se_cmd *cmd) { struct pscsi_plugin_task *pt; - unsigned char *cdb = cmd->t_task->t_task_cdb; + unsigned char *cdb = cmd->t_task.t_task_cdb; pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); if (!pt) { @@ -776,7 +776,7 @@ pscsi_alloc_task(struct se_cmd *cmd) * allocate the extended CDB buffer for per struct se_task context * pt->pscsi_cdb now. */ - if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) { + if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) { pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); if (!(pt->pscsi_cdb)) { @@ -812,7 +812,7 @@ static inline void pscsi_blk_init_request( * also set the end_io_data pointer.to struct se_task. */ req->end_io = pscsi_req_done; - req->end_io_data = (void *)task; + req->end_io_data = task; /* * Load the referenced struct se_task's SCSI CDB into * include/linux/blkdev.h:struct request->cmd @@ -822,7 +822,7 @@ static inline void pscsi_blk_init_request( /* * Setup pointer for outgoing sense data. */ - req->sense = (void *)&pt->pscsi_sense[0]; + req->sense = &pt->pscsi_sense[0]; req->sense_len = 0; } @@ -889,7 +889,7 @@ static void pscsi_free_task(struct se_task *task) * Release the extended CDB allocation from pscsi_alloc_task() * if one exists. */ - if (cmd->t_task->t_task_cdb != cmd->t_task->__t_task_cdb) + if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) kfree(pt->pscsi_cdb); /* * We do not release the bio(s) here associated with this task, as @@ -1266,7 +1266,7 @@ static int pscsi_map_task_non_SG(struct se_task *task) return 0; ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, - pt->pscsi_req, cmd->t_task->t_task_buf, + pt->pscsi_req, cmd->t_task.t_task_buf, task->task_size, GFP_KERNEL); if (ret < 0) { printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index fbf06c3994fd..384a8e2083e3 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -66,7 +66,7 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) rd_host->rd_host_id = host_id; - hba->hba_ptr = (void *) rd_host; + hba->hba_ptr = rd_host; printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, @@ -271,7 +271,7 @@ static struct se_device *rd_create_virtdevice( dev = transport_add_device_to_core_hba(hba, (rd_dev->rd_direct) ? &rd_dr_template : - &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev, + &rd_mcp_template, se_dev, dev_flags, rd_dev, &dev_limits, prod, rev); if (!(dev)) goto fail; @@ -336,7 +336,7 @@ rd_alloc_task(struct se_cmd *cmd) printk(KERN_ERR "Unable to allocate struct rd_request\n"); return NULL; } - rd_req->rd_dev = cmd->se_lun->lun_se_dev->dev_ptr; + rd_req->rd_dev = cmd->se_dev->dev_ptr; return &rd_req->rd_task; } @@ -737,7 +737,7 @@ check_eot: } out: - task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; #ifdef DEBUG_RAMDISK_DR printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", *se_mem_cnt); @@ -819,7 +819,7 @@ static int rd_DIRECT_without_offset( } out: - task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; #ifdef DEBUG_RAMDISK_DR printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", *se_mem_cnt); @@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map( * across multiple struct se_task->task_sg[]. */ ret = transport_init_task_sg(task, - list_entry(cmd->t_task->t_mem_list->next, + list_first_entry(&cmd->t_task.t_mem_list, struct se_mem, se_list), task_offset); if (ret <= 0) return ret; return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - list_entry(cmd->t_task->t_mem_list->next, + list_first_entry(&cmd->t_task.t_mem_list, struct se_mem, se_list), out_se_mem, se_mem_cnt, task_offset_in); } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 2f73749b8151..e1f99f75ac35 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -113,15 +113,14 @@ int core_tmr_lun_reset( struct list_head *preempt_and_abort_list, struct se_cmd *prout_cmd) { - struct se_cmd *cmd; - struct se_queue_req *qr, *qr_tmp; + struct se_cmd *cmd, *tcmd; struct se_node_acl *tmr_nacl = NULL; struct se_portal_group *tmr_tpg = NULL; struct se_queue_obj *qobj = &dev->dev_queue_obj; struct se_tmr_req *tmr_p, *tmr_pp; struct se_task *task, *task_tmp; unsigned long flags; - int fe_count, state, tas; + int fe_count, tas; /* * TASK_ABORTED status bit, this is configurable via ConfigFS * struct se_device attributes. spc4r17 section 7.4.6 Control mode page @@ -179,14 +178,14 @@ int core_tmr_lun_reset( continue; spin_unlock(&dev->se_tmr_lock); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->t_transport_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.t_transport_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } @@ -194,7 +193,7 @@ int core_tmr_lun_reset( " Response: 0x%02x, t_state: %d\n", (preempt_and_abort_list) ? "Preempt" : "", tmr_p, tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_cmd_finish_abort_tmr(cmd); spin_lock(&dev->se_tmr_lock); @@ -230,12 +229,6 @@ int core_tmr_lun_reset( } cmd = task->task_se_cmd; - if (!cmd->t_task) { - printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:" - " %p ITT: 0x%08x\n", task, cmd, - cmd->se_tfo->get_task_tag(cmd)); - continue; - } /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. @@ -254,38 +247,38 @@ int core_tmr_lun_reset( atomic_set(&task->task_state_active, 0); spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" "def_t_state: %d/%d cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state, cmd->t_task->t_task_cdb[0]); + cmd->deferred_t_state, cmd->t_task.t_task_cdb[0]); DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, - cmd->t_task->t_task_cdbs, - atomic_read(&cmd->t_task->t_task_cdbs_left), - atomic_read(&cmd->t_task->t_task_cdbs_sent), - atomic_read(&cmd->t_task->t_transport_active), - atomic_read(&cmd->t_task->t_transport_stop), - atomic_read(&cmd->t_task->t_transport_sent)); + cmd->t_task.t_task_cdbs, + atomic_read(&cmd->t_task.t_task_cdbs_left), + atomic_read(&cmd->t_task.t_task_cdbs_sent), + atomic_read(&cmd->t_task.t_transport_active), + atomic_read(&cmd->t_task.t_transport_stop), + atomic_read(&cmd->t_task.t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" " for dev: %p\n", task, dev); wait_for_completion(&task->task_stop_comp); DEBUG_LR("LUN_RESET Completed task: %p shutdown for" " dev: %p\n", task, dev); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - atomic_dec(&cmd->t_task->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + atomic_dec(&cmd->t_task.t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -295,24 +288,24 @@ int core_tmr_lun_reset( } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { + if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task.t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - fe_count = atomic_read(&cmd->t_task->t_fe_count); + fe_count = atomic_read(&cmd->t_task.t_fe_count); - if (atomic_read(&cmd->t_task->t_transport_active)) { + if (atomic_read(&cmd->t_task.t_transport_active)) { DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&cmd->t_task->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + atomic_set(&cmd->t_task.t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); @@ -321,8 +314,8 @@ int core_tmr_lun_reset( } DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&cmd->t_task->t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + atomic_set(&cmd->t_task.t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -337,20 +330,7 @@ int core_tmr_lun_reset( * reference, otherwise the struct se_cmd is released. */ spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) { - cmd = (struct se_cmd *)qr->cmd; - if (!(cmd)) { - /* - * Skip these for non PREEMPT_AND_ABORT usage.. - */ - if (preempt_and_abort_list != NULL) - continue; - - atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); - kfree(qr); - continue; - } + list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) { /* * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. @@ -365,18 +345,15 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - atomic_dec(&cmd->t_task->t_transport_queue_active); + atomic_dec(&cmd->t_task.t_transport_queue_active); atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); + list_del(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - state = qr->state; - kfree(qr); - DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? - "Preempt" : "", cmd, state, - atomic_read(&cmd->t_task->t_fe_count)); + "Preempt" : "", cmd, cmd->t_state, + atomic_read(&cmd->t_task.t_fe_count)); /* * Signal that the command has failed via cmd->se_cmd_flags, * and call TFO->new_cmd_failure() to wakeup any fabric @@ -388,7 +365,7 @@ int core_tmr_lun_reset( transport_new_cmd_failure(cmd); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, - atomic_read(&cmd->t_task->t_fe_count)); + atomic_read(&cmd->t_task.t_fe_count)); spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 6f5d4dfa14e8..d0cd6016d3b5 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -201,7 +201,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; typedef int (*map_func_t)(struct se_task *, u32); static int transport_generic_write_pending(struct se_cmd *); -static int transport_processing_thread(void *); +static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_direct_request_timeout(struct se_cmd *cmd); @@ -215,9 +215,8 @@ static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, static int transport_generic_remove(struct se_cmd *cmd, int release_to_pool, int session_reinstatement); static int transport_get_sectors(struct se_cmd *cmd); -static struct list_head *transport_init_se_mem_list(void); static int transport_map_sg_to_mem(struct se_cmd *cmd, - struct list_head *se_mem_list, void *in_mem, + struct list_head *se_mem_list, struct scatterlist *sgl, u32 *se_mem_cnt); static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, unsigned char *dst, struct list_head *se_mem_list); @@ -574,7 +573,7 @@ void transport_deregister_session(struct se_session *se_sess) EXPORT_SYMBOL(transport_deregister_session); /* - * Called with cmd->t_task->t_state_lock held. + * Called with cmd->t_task.t_state_lock held. */ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) { @@ -582,10 +581,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - if (!cmd->t_task) - return; - - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { dev = task->se_dev; if (!(dev)) continue; @@ -603,7 +599,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); atomic_set(&task->task_state_active, 0); - atomic_dec(&cmd->t_task->t_task_cdbs_ex_left); + atomic_dec(&cmd->t_task.t_task_cdbs_ex_left); } } @@ -622,32 +618,32 @@ static int transport_cmd_check_stop( { unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); /* * Determine if IOCTL context caller in requesting the stopping of this * command for LUN shutdown purposes. */ - if (atomic_read(&cmd->t_task->transport_lun_stop)) { - DEBUG_CS("%s:%d atomic_read(&cmd->t_task->transport_lun_stop)" + if (atomic_read(&cmd->t_task.transport_lun_stop)) { + DEBUG_CS("%s:%d atomic_read(&cmd->t_task.transport_lun_stop)" " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; - atomic_set(&cmd->t_task->t_transport_active, 0); + atomic_set(&cmd->t_task.t_transport_active, 0); if (transport_off == 2) transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - complete(&cmd->t_task->transport_lun_stop_comp); + complete(&cmd->t_task.transport_lun_stop_comp); return 1; } /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ - if (atomic_read(&cmd->t_task->t_transport_stop)) { - DEBUG_CS("%s:%d atomic_read(&cmd->t_task->t_transport_stop) ==" + if (atomic_read(&cmd->t_task.t_transport_stop)) { + DEBUG_CS("%s:%d atomic_read(&cmd->t_task.t_transport_stop) ==" " TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); @@ -662,13 +658,13 @@ static int transport_cmd_check_stop( */ if (transport_off == 2) cmd->se_lun = NULL; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - complete(&cmd->t_task->t_transport_stop_comp); + complete(&cmd->t_task.t_transport_stop_comp); return 1; } if (transport_off) { - atomic_set(&cmd->t_task->t_transport_active, 0); + atomic_set(&cmd->t_task.t_transport_active, 0); if (transport_off == 2) { transport_all_task_dev_remove_state(cmd); /* @@ -683,18 +679,18 @@ static int transport_cmd_check_stop( */ if (cmd->se_tfo->check_stop_free != NULL) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); cmd->se_tfo->check_stop_free(cmd); return 1; } } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } else if (t_state) cmd->t_state = t_state; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } @@ -712,21 +708,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) if (!lun) return; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); goto check_lun; } - atomic_set(&cmd->t_task->transport_dev_active, 0); + atomic_set(&cmd->t_task.transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); check_lun: spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (atomic_read(&cmd->t_task->transport_lun_active)) { - list_del(&cmd->se_lun_list); - atomic_set(&cmd->t_task->transport_lun_active, 0); + if (atomic_read(&cmd->t_task.transport_lun_active)) { + list_del(&cmd->se_lun_node); + atomic_set(&cmd->t_task.transport_lun_active, 0); #if 0 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); @@ -737,7 +733,7 @@ check_lun: void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { - transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) @@ -748,7 +744,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) { - transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); if (transport_cmd_check_stop_to_fabric(cmd)) return; @@ -756,50 +752,36 @@ void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) transport_generic_remove(cmd, 0, 0); } -static int transport_add_cmd_to_queue( +static void transport_add_cmd_to_queue( struct se_cmd *cmd, int t_state) { struct se_device *dev = cmd->se_dev; struct se_queue_obj *qobj = &dev->dev_queue_obj; - struct se_queue_req *qr; unsigned long flags; - qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC); - if (!(qr)) { - printk(KERN_ERR "Unable to allocate memory for" - " struct se_queue_req\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&qr->qr_list); - - qr->cmd = cmd; - qr->state = t_state; + INIT_LIST_HEAD(&cmd->se_queue_node); if (t_state) { - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); cmd->t_state = t_state; - atomic_set(&cmd->t_task->t_transport_active, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + atomic_set(&cmd->t_task.t_transport_active, 1); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_add_tail(&qr->qr_list, &qobj->qobj_list); - atomic_inc(&cmd->t_task->t_transport_queue_active); + list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); + atomic_inc(&cmd->t_task.t_transport_queue_active); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); atomic_inc(&qobj->queue_cnt); wake_up_interruptible(&qobj->thread_wq); - return 0; } -/* - * Called with struct se_queue_obj->cmd_queue_lock held. - */ -static struct se_queue_req * -transport_get_qr_from_queue(struct se_queue_obj *qobj) +static struct se_cmd * +transport_get_cmd_from_queue(struct se_queue_obj *qobj) { - struct se_queue_req *qr; + struct se_cmd *cmd; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -807,47 +789,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj) spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return NULL; } + cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); - list_for_each_entry(qr, &qobj->qobj_list, qr_list) - break; + atomic_dec(&cmd->t_task.t_transport_queue_active); - if (qr->cmd) - atomic_dec(&qr->cmd->t_task->t_transport_queue_active); - - list_del(&qr->qr_list); + list_del(&cmd->se_queue_node); atomic_dec(&qobj->queue_cnt); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - return qr; + return cmd; } static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj) { - struct se_queue_req *qr = NULL, *qr_p = NULL; + struct se_cmd *t; unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (!(atomic_read(&cmd->t_task->t_transport_queue_active))) { + if (!(atomic_read(&cmd->t_task.t_transport_queue_active))) { spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } - list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) { - if (qr->cmd != cmd) - continue; - - atomic_dec(&qr->cmd->t_task->t_transport_queue_active); - atomic_dec(&qobj->queue_cnt); - list_del(&qr->qr_list); - kfree(qr); - } + list_for_each_entry(t, &qobj->qobj_list, se_queue_node) + if (t == cmd) { + atomic_dec(&cmd->t_task.t_transport_queue_active); + atomic_dec(&qobj->queue_cnt); + list_del(&cmd->se_queue_node); + break; + } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - if (atomic_read(&cmd->t_task->t_transport_queue_active)) { + if (atomic_read(&cmd->t_task.t_transport_queue_active)) { printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", cmd->se_tfo->get_task_tag(cmd), - atomic_read(&cmd->t_task->t_transport_queue_active)); + atomic_read(&cmd->t_task.t_transport_queue_active)); } } @@ -857,7 +834,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, */ void transport_complete_sync_cache(struct se_cmd *cmd, int good) { - struct se_task *task = list_entry(cmd->t_task->t_task_list.next, + struct se_task *task = list_entry(cmd->t_task.t_task_list.next, struct se_task, t_list); if (good) { @@ -887,12 +864,12 @@ void transport_complete_task(struct se_task *task, int success) unsigned long flags; #if 0 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, - cmd->t_task->t_task_cdb[0], dev); + cmd->t_task.t_task_cdb[0], dev); #endif if (dev) atomic_inc(&dev->depth_left); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); atomic_set(&task->task_active, 0); /* @@ -914,14 +891,14 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_stop)) { /* - * Decrement cmd->t_task->t_se_count if this task had + * Decrement cmd->t_task.t_se_count if this task had * previously thrown its timeout exception handler. */ if (atomic_read(&task->task_timeout)) { - atomic_dec(&cmd->t_task->t_se_count); + atomic_dec(&cmd->t_task.t_se_count); atomic_set(&task->task_timeout, 0); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); complete(&task->task_stop_comp); return; @@ -933,33 +910,33 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_timeout)) { if (!(atomic_dec_and_test( - &cmd->t_task->t_task_cdbs_timeout_left))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + &cmd->t_task.t_task_cdbs_timeout_left))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } t_state = TRANSPORT_COMPLETE_TIMEOUT; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); return; } - atomic_dec(&cmd->t_task->t_task_cdbs_timeout_left); + atomic_dec(&cmd->t_task.t_task_cdbs_timeout_left); /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the * device queue depending upon int success. */ - if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { + if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) { if (!success) - cmd->t_task->t_tasks_failed = 1; + cmd->t_task.t_tasks_failed = 1; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } - if (!success || cmd->t_task->t_tasks_failed) { + if (!success || cmd->t_task.t_tasks_failed) { t_state = TRANSPORT_COMPLETE_FAILURE; if (!task->task_error_status) { task->task_error_status = @@ -968,10 +945,10 @@ void transport_complete_task(struct se_task *task, int success) PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } } else { - atomic_set(&cmd->t_task->t_transport_complete, 1); + atomic_set(&cmd->t_task.t_transport_complete, 1); t_state = TRANSPORT_COMPLETE_OK; } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); } @@ -1064,8 +1041,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { dev = task->se_dev; if (atomic_read(&task->task_state_active)) @@ -1081,17 +1058,17 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) spin_unlock(&dev->execute_task_lock); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } static void transport_add_tasks_from_cmd(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_task *task, *task_prev = NULL; unsigned long flags; spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { if (atomic_read(&task->task_execute_queue)) continue; /* @@ -1184,19 +1161,15 @@ void transport_dump_dev_state( */ static void transport_release_all_cmds(struct se_device *dev) { - struct se_cmd *cmd = NULL; - struct se_queue_req *qr = NULL, *qr_p = NULL; + struct se_cmd *cmd, *tcmd; int bug_out = 0, t_state; unsigned long flags; spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); - list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj.qobj_list, - qr_list) { - - cmd = qr->cmd; - t_state = qr->state; - list_del(&qr->qr_list); - kfree(qr); + list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, + se_queue_node) { + t_state = cmd->t_state; + list_del(&cmd->se_queue_node); spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); @@ -1548,7 +1521,7 @@ struct se_device *transport_add_device_to_core_hba( transport_init_queue_obj(&dev->dev_queue_obj); dev->dev_flags = device_flags; dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; - dev->dev_ptr = (void *) transport_dev; + dev->dev_ptr = transport_dev; dev->se_hba = hba; dev->se_sub_dev = se_dev; dev->transport = transport; @@ -1684,7 +1657,7 @@ transport_generic_get_task(struct se_cmd *cmd, enum dma_data_direction data_direction) { struct se_task *task; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; unsigned long flags; task = dev->transport->alloc_task(cmd); @@ -1697,26 +1670,20 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_execute_list); INIT_LIST_HEAD(&task->t_state_list); init_completion(&task->task_stop_comp); - task->task_no = cmd->t_task->t_tasks_no++; + task->task_no = cmd->t_task.t_tasks_no++; task->task_se_cmd = cmd; task->se_dev = dev; task->task_data_direction = data_direction; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task->t_task_list); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task.t_task_list); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return task; } static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *); -void transport_device_setup_cmd(struct se_cmd *cmd) -{ - cmd->se_dev = cmd->se_lun->lun_se_dev; -} -EXPORT_SYMBOL(transport_device_setup_cmd); - /* * Used by fabric modules containing a local struct se_cmd within their * fabric dependent per I/O descriptor. @@ -1730,20 +1697,18 @@ void transport_init_se_cmd( int task_attr, unsigned char *sense_buffer) { - INIT_LIST_HEAD(&cmd->se_lun_list); - INIT_LIST_HEAD(&cmd->se_delayed_list); - INIT_LIST_HEAD(&cmd->se_ordered_list); - /* - * Setup t_task pointer to t_task_backstore - */ - cmd->t_task = &cmd->t_task_backstore; + INIT_LIST_HEAD(&cmd->se_lun_node); + INIT_LIST_HEAD(&cmd->se_delayed_node); + INIT_LIST_HEAD(&cmd->se_ordered_node); - INIT_LIST_HEAD(&cmd->t_task->t_task_list); - init_completion(&cmd->t_task->transport_lun_fe_stop_comp); - init_completion(&cmd->t_task->transport_lun_stop_comp); - init_completion(&cmd->t_task->t_transport_stop_comp); - spin_lock_init(&cmd->t_task->t_state_lock); - atomic_set(&cmd->t_task->transport_dev_active, 1); + INIT_LIST_HEAD(&cmd->t_task.t_mem_list); + INIT_LIST_HEAD(&cmd->t_task.t_mem_bidi_list); + INIT_LIST_HEAD(&cmd->t_task.t_task_list); + init_completion(&cmd->t_task.transport_lun_fe_stop_comp); + init_completion(&cmd->t_task.transport_lun_stop_comp); + init_completion(&cmd->t_task.t_transport_stop_comp); + spin_lock_init(&cmd->t_task.t_state_lock); + atomic_set(&cmd->t_task.transport_dev_active, 1); cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1760,7 +1725,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ - if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 0; if (cmd->sam_task_attr == MSG_ACA_TAG) { @@ -1772,7 +1737,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) * Used to determine when ORDERED commands should go from * Dormant to Active status. */ - cmd->se_ordered_id = atomic_inc_return(&cmd->se_lun->lun_se_dev->dev_ordered_id); + cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); smp_mb__after_atomic_inc(); DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, @@ -1788,8 +1753,8 @@ void transport_free_se_cmd( /* * Check and free any extended CDB buffer that was allocated */ - if (se_cmd->t_task->t_task_cdb != se_cmd->t_task->__t_task_cdb) - kfree(se_cmd->t_task->t_task_cdb); + if (se_cmd->t_task.t_task_cdb != se_cmd->t_task.__t_task_cdb) + kfree(se_cmd->t_task.t_task_cdb); } EXPORT_SYMBOL(transport_free_se_cmd); @@ -1812,7 +1777,6 @@ int transport_generic_allocate_tasks( */ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_device_setup_cmd(cmd); /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD @@ -1828,26 +1792,26 @@ int transport_generic_allocate_tasks( * allocate the additional extended CDB buffer now.. Otherwise * setup the pointer from __t_task_cdb to t_task_cdb. */ - if (scsi_command_size(cdb) > sizeof(cmd->t_task->__t_task_cdb)) { - cmd->t_task->t_task_cdb = kzalloc(scsi_command_size(cdb), + if (scsi_command_size(cdb) > sizeof(cmd->t_task.__t_task_cdb)) { + cmd->t_task.t_task_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(cmd->t_task->t_task_cdb)) { - printk(KERN_ERR "Unable to allocate cmd->t_task->t_task_cdb" - " %u > sizeof(cmd->t_task->__t_task_cdb): %lu ops\n", + if (!(cmd->t_task.t_task_cdb)) { + printk(KERN_ERR "Unable to allocate cmd->t_task.t_task_cdb" + " %u > sizeof(cmd->t_task.__t_task_cdb): %lu ops\n", scsi_command_size(cdb), - (unsigned long)sizeof(cmd->t_task->__t_task_cdb)); + (unsigned long)sizeof(cmd->t_task.__t_task_cdb)); return -ENOMEM; } } else - cmd->t_task->t_task_cdb = &cmd->t_task->__t_task_cdb[0]; + cmd->t_task.t_task_cdb = &cmd->t_task.__t_task_cdb[0]; /* * Copy the original CDB into cmd->t_task. */ - memcpy(cmd->t_task->t_task_cdb, cdb, scsi_command_size(cdb)); + memcpy(cmd->t_task.t_task_cdb, cdb, scsi_command_size(cdb)); /* * Setup the received CDB based on SCSI defined opcodes and * perform unit attention, persistent reservations and ALUA - * checks for virtual device backends. The cmd->t_task->t_task_cdb + * checks for virtual device backends. The cmd->t_task.t_task_cdb * pointer is expected to be setup before we reach this point. */ ret = transport_generic_cmd_sequencer(cmd, cdb); @@ -1859,7 +1823,7 @@ int transport_generic_allocate_tasks( if (transport_check_alloc_task_attr(cmd) < 0) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -2; + return -EINVAL; } spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) @@ -1947,7 +1911,6 @@ int transport_generic_handle_tmr( * This is needed for early exceptions. */ cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - transport_device_setup_cmd(cmd); transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); return 0; @@ -1973,9 +1936,9 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) /* * No tasks remain in the execution queue */ - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task->t_task_list, t_list) { + &cmd->t_task.t_task_list, t_list) { DEBUG_TS("task_no[%d] - Processing task %p\n", task->task_no, task); /* @@ -1984,14 +1947,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (!atomic_read(&task->task_sent) && !atomic_read(&task->task_active)) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_remove_task_from_execute_queue(task, task->se_dev); DEBUG_TS("task_no[%d] - Removed from execute queue\n", task->task_no); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); continue; } @@ -2001,7 +1964,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); DEBUG_TS("task_no[%d] - Waiting to complete\n", @@ -2010,8 +1973,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) DEBUG_TS("task_no[%d] - Stopped successfully\n", task->task_no); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - atomic_dec(&cmd->t_task->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + atomic_dec(&cmd->t_task.t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -2022,7 +1985,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) __transport_stop_task_timer(task, &flags); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return ret; } @@ -2038,7 +2001,7 @@ static void transport_generic_request_failure( { DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), - cmd->t_task->t_task_cdb[0]); + cmd->t_task.t_task_cdb[0]); DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" " %d/%d transport_error_status: %d\n", cmd->se_tfo->get_cmd_state(cmd), @@ -2047,13 +2010,13 @@ static void transport_generic_request_failure( DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" - " t_transport_sent: %d\n", cmd->t_task->t_task_cdbs, - atomic_read(&cmd->t_task->t_task_cdbs_left), - atomic_read(&cmd->t_task->t_task_cdbs_sent), - atomic_read(&cmd->t_task->t_task_cdbs_ex_left), - atomic_read(&cmd->t_task->t_transport_active), - atomic_read(&cmd->t_task->t_transport_stop), - atomic_read(&cmd->t_task->t_transport_sent)); + " t_transport_sent: %d\n", cmd->t_task.t_task_cdbs, + atomic_read(&cmd->t_task.t_task_cdbs_left), + atomic_read(&cmd->t_task.t_task_cdbs_sent), + atomic_read(&cmd->t_task.t_task_cdbs_ex_left), + atomic_read(&cmd->t_task.t_transport_active), + atomic_read(&cmd->t_task.t_transport_stop), + atomic_read(&cmd->t_task.t_transport_sent)); transport_stop_all_task_timers(cmd); @@ -2135,7 +2098,7 @@ static void transport_generic_request_failure( break; default: printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", - cmd->t_task->t_task_cdb[0], + cmd->t_task.t_task_cdb[0], cmd->transport_error_status); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; @@ -2156,19 +2119,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->t_transport_timeout))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.t_transport_timeout))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } - if (atomic_read(&cmd->t_task->t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + if (atomic_read(&cmd->t_task.t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } - atomic_sub(atomic_read(&cmd->t_task->t_transport_timeout), - &cmd->t_task->t_se_count); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + atomic_sub(atomic_read(&cmd->t_task.t_transport_timeout), + &cmd->t_task.t_se_count); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } static void transport_generic_request_timeout(struct se_cmd *cmd) @@ -2176,16 +2139,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) unsigned long flags; /* - * Reset cmd->t_task->t_se_count to allow transport_generic_remove() + * Reset cmd->t_task.t_se_count to allow transport_generic_remove() * to allow last call to free memory resources. */ - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (atomic_read(&cmd->t_task->t_transport_timeout) > 1) { - int tmp = (atomic_read(&cmd->t_task->t_transport_timeout) - 1); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (atomic_read(&cmd->t_task.t_transport_timeout) > 1) { + int tmp = (atomic_read(&cmd->t_task.t_transport_timeout) - 1); - atomic_sub(tmp, &cmd->t_task->t_se_count); + atomic_sub(tmp, &cmd->t_task.t_se_count); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_generic_remove(cmd, 0, 0); } @@ -2201,8 +2164,8 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) return -ENOMEM; } - cmd->t_task->t_tasks_se_num = 0; - cmd->t_task->t_task_buf = buf; + cmd->t_task.t_tasks_se_num = 0; + cmd->t_task.t_task_buf = buf; return 0; } @@ -2244,9 +2207,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) { unsigned long flags; - spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags); } /* @@ -2260,9 +2223,9 @@ static void transport_task_timeout_handler(unsigned long data) DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); if (task->task_flags & TF_STOP) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } task->task_flags &= ~TF_RUNNING; @@ -2273,13 +2236,13 @@ static void transport_task_timeout_handler(unsigned long data) if (!(atomic_read(&task->task_active))) { DEBUG_TT("transport task: %p cmd: %p timeout task_active" " == 0\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } - atomic_inc(&cmd->t_task->t_se_count); - atomic_inc(&cmd->t_task->t_transport_timeout); - cmd->t_task->t_tasks_failed = 1; + atomic_inc(&cmd->t_task.t_se_count); + atomic_inc(&cmd->t_task.t_transport_timeout); + cmd->t_task.t_tasks_failed = 1; atomic_set(&task->task_timeout, 1); task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; @@ -2288,28 +2251,28 @@ static void transport_task_timeout_handler(unsigned long data) if (atomic_read(&task->task_stop)) { DEBUG_TT("transport task: %p cmd: %p timeout task_stop" " == 1\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); complete(&task->task_stop_comp); return; } - if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_left))) { + if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) { DEBUG_TT("transport task: %p cmd: %p timeout non zero" " t_task_cdbs_left\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return; } DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", task, cmd); cmd->t_state = TRANSPORT_COMPLETE_FAILURE; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); } /* - * Called with cmd->t_task->t_state_lock held. + * Called with cmd->t_task.t_state_lock held. */ static void transport_start_task_timer(struct se_task *task) { @@ -2339,7 +2302,7 @@ static void transport_start_task_timer(struct se_task *task) } /* - * Called with spin_lock_irq(&cmd->t_task->t_state_lock) held. + * Called with spin_lock_irq(&cmd->t_task.t_state_lock) held. */ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) { @@ -2349,11 +2312,11 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) return; task->task_flags |= TF_STOP; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, *flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, *flags); del_timer_sync(&task->task_timer); - spin_lock_irqsave(&cmd->t_task->t_state_lock, *flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, *flags); task->task_flags &= ~TF_RUNNING; task->task_flags &= ~TF_STOP; } @@ -2363,11 +2326,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd) struct se_task *task = NULL, *task_tmp; unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task->t_task_list, t_list) + &cmd->t_task.t_task_list, t_list) __transport_stop_task_timer(task, &flags); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } static inline int transport_tcq_window_closed(struct se_device *dev) @@ -2391,14 +2354,14 @@ static inline int transport_tcq_window_closed(struct se_device *dev) */ static inline int transport_execute_task_attr(struct se_cmd *cmd) { - if (cmd->se_lun->lun_se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) return 1; /* * Check for the existence of HEAD_OF_QUEUE, and if true return 1 * to allow the passed struct se_cmd list of tasks to the front of the list. */ if (cmd->sam_task_attr == MSG_HEAD_TAG) { - atomic_inc(&cmd->se_lun->lun_se_dev->dev_hoq_count); + atomic_inc(&cmd->se_dev->dev_hoq_count); smp_mb__after_atomic_inc(); DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", @@ -2406,30 +2369,30 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { - spin_lock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); - list_add_tail(&cmd->se_ordered_list, - &cmd->se_lun->lun_se_dev->ordered_cmd_list); - spin_unlock(&cmd->se_lun->lun_se_dev->ordered_cmd_lock); + spin_lock(&cmd->se_dev->ordered_cmd_lock); + list_add_tail(&cmd->se_ordered_node, + &cmd->se_dev->ordered_cmd_list); + spin_unlock(&cmd->se_dev->ordered_cmd_lock); - atomic_inc(&cmd->se_lun->lun_se_dev->dev_ordered_sync); + atomic_inc(&cmd->se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" " list, se_ordered_id: %u\n", - cmd->t_task->t_task_cdb[0], + cmd->t_task.t_task_cdb[0], cmd->se_ordered_id); /* * Add ORDERED command to tail of execution queue if * no other older commands exist that need to be * completed first. */ - if (!(atomic_read(&cmd->se_lun->lun_se_dev->simple_cmds))) + if (!(atomic_read(&cmd->se_dev->simple_cmds))) return 1; } else { /* * For SIMPLE and UNTAGGED Task Attribute commands */ - atomic_inc(&cmd->se_lun->lun_se_dev->simple_cmds); + atomic_inc(&cmd->se_dev->simple_cmds); smp_mb__after_atomic_inc(); } /* @@ -2437,20 +2400,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * add the dormant task(s) built for the passed struct se_cmd to the * execution queue and become in Active state for this struct se_device. */ - if (atomic_read(&cmd->se_lun->lun_se_dev->dev_ordered_sync) != 0) { + if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) { /* * Otherwise, add cmd w/ tasks to delayed cmd queue that * will be drained upon completion of HEAD_OF_QUEUE task. */ - spin_lock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); + spin_lock(&cmd->se_dev->delayed_cmd_lock); cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR; - list_add_tail(&cmd->se_delayed_list, - &cmd->se_lun->lun_se_dev->delayed_cmd_list); - spin_unlock(&cmd->se_lun->lun_se_dev->delayed_cmd_lock); + list_add_tail(&cmd->se_delayed_node, + &cmd->se_dev->delayed_cmd_list); + spin_unlock(&cmd->se_dev->delayed_cmd_lock); DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" " delayed CMD list, se_ordered_id: %u\n", - cmd->t_task->t_task_cdb[0], cmd->sam_task_attr, + cmd->t_task.t_task_cdb[0], cmd->sam_task_attr, cmd->se_ordered_id); /* * Return zero to let transport_execute_tasks() know @@ -2505,7 +2468,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) * storage object. */ execute_tasks: - __transport_execute_tasks(cmd->se_lun->lun_se_dev); + __transport_execute_tasks(cmd->se_dev); return 0; } @@ -2548,17 +2511,17 @@ check_depth: cmd = task->task_se_cmd; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); atomic_set(&task->task_active, 1); atomic_set(&task->task_sent, 1); - atomic_inc(&cmd->t_task->t_task_cdbs_sent); + atomic_inc(&cmd->t_task.t_task_cdbs_sent); - if (atomic_read(&cmd->t_task->t_task_cdbs_sent) == - cmd->t_task->t_task_cdbs) + if (atomic_read(&cmd->t_task.t_task_cdbs_sent) == + cmd->t_task.t_task_cdbs) atomic_set(&cmd->transport_sent, 1); transport_start_task_timer(task); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); /* * The struct se_cmd->transport_emulate_cdb() function pointer is used * to grab REPORT_LUNS and other CDBs we want to handle before they hit the @@ -2623,10 +2586,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) * Any unsolicited data will get dumped for failed command inside of * the fabric plugin */ - spin_lock_irqsave(&se_cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&se_cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags); se_cmd->se_tfo->new_cmd_failure(se_cmd); } @@ -2638,7 +2601,7 @@ static inline u32 transport_get_sectors_6( struct se_cmd *cmd, int *ret) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2666,7 +2629,7 @@ static inline u32 transport_get_sectors_10( struct se_cmd *cmd, int *ret) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2696,7 +2659,7 @@ static inline u32 transport_get_sectors_12( struct se_cmd *cmd, int *ret) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2726,7 +2689,7 @@ static inline u32 transport_get_sectors_16( struct se_cmd *cmd, int *ret) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; /* * Assume TYPE_DISK for non struct se_device objects. @@ -2768,7 +2731,7 @@ static inline u32 transport_get_size( unsigned char *cdb, struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; if (dev->transport->get_device_type(dev) == TYPE_TAPE) { if (cdb[1] & 1) { /* sectors */ @@ -2836,17 +2799,17 @@ static void transport_xor_callback(struct se_cmd *cmd) return; } /* - * Copy the scatterlist WRITE buffer located at cmd->t_task->t_mem_list + * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list * into the locally allocated *buf */ - transport_memcpy_se_mem_read_contig(cmd, buf, cmd->t_task->t_mem_list); + transport_memcpy_se_mem_read_contig(cmd, buf, &cmd->t_task.t_mem_list); /* * Now perform the XOR against the BIDI read memory located at - * cmd->t_task->t_mem_bidi_list + * cmd->t_task.t_mem_bidi_list */ offset = 0; - list_for_each_entry(se_mem, cmd->t_task->t_mem_bidi_list, se_list) { + list_for_each_entry(se_mem, &cmd->t_task.t_mem_bidi_list, se_list) { addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); if (!(addr)) goto out; @@ -2874,14 +2837,14 @@ static int transport_get_sense_data(struct se_cmd *cmd) WARN_ON(!cmd->se_lun); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } list_for_each_entry_safe(task, task_tmp, - &cmd->t_task->t_task_list, t_list) { + &cmd->t_task.t_task_list, t_list) { if (!task->task_sense) continue; @@ -2903,12 +2866,12 @@ static int transport_get_sense_data(struct se_cmd *cmd) cmd->se_tfo->get_task_tag(cmd), task->task_no); continue; } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); - memcpy((void *)&buffer[offset], (void *)sense_buffer, + memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); cmd->scsi_status = task->task_scsi_status; /* Automatically padded */ @@ -2921,7 +2884,7 @@ static int transport_get_sense_data(struct se_cmd *cmd) cmd->scsi_status); return 0; } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return -1; } @@ -2958,7 +2921,7 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) core_scsi3_ua_allocate(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - return -2; + return -EINVAL; } /* transport_generic_cmd_sequencer(): @@ -2975,7 +2938,7 @@ static int transport_generic_cmd_sequencer( struct se_cmd *cmd, unsigned char *cdb) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_subsystem_dev *su_dev = dev->se_sub_dev; int ret = 0, sector_ret = 0, passthrough; u32 sectors = 0, size = 0, pr_reg_type = 0; @@ -2989,7 +2952,7 @@ static int transport_generic_cmd_sequencer( &transport_nop_wait_for_tasks; cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION; - return -2; + return -EINVAL; } /* * Check status of Asymmetric Logical Unit Assignment port @@ -3011,7 +2974,7 @@ static int transport_generic_cmd_sequencer( transport_set_sense_codes(cmd, 0x04, alua_ascq); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY; - return -2; + return -EINVAL; } goto out_invalid_cdb_field; } @@ -3036,7 +2999,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - cmd->t_task->t_task_lba = transport_lba_21(cdb); + cmd->t_task.t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_10: @@ -3045,7 +3008,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_12: @@ -3054,7 +3017,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_16: @@ -3063,7 +3026,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - cmd->t_task->t_task_lba = transport_lba_64(cdb); + cmd->t_task.t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_6: @@ -3072,7 +3035,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - cmd->t_task->t_task_lba = transport_lba_21(cdb); + cmd->t_task.t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_10: @@ -3081,8 +3044,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task->t_task_lba = transport_lba_32(cdb); - cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task.t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -3091,8 +3054,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - cmd->t_task->t_task_lba = transport_lba_32(cdb); - cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task.t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -3101,20 +3064,20 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - cmd->t_task->t_task_lba = transport_lba_64(cdb); - cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task.t_task_lba = transport_lba_64(cdb); + cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(cmd->t_task->t_tasks_bidi)) + !(cmd->t_task.t_tasks_bidi)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); @@ -3127,7 +3090,7 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run during transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_task->t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -3149,7 +3112,7 @@ static int transport_generic_cmd_sequencer( * XDWRITE_READ_32 logic. */ cmd->transport_split_cdb = &split_cdb_XX_32; - cmd->t_task->t_task_lba = transport_lba_64_ext(cdb); + cmd->t_task.t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; /* @@ -3163,14 +3126,14 @@ static int transport_generic_cmd_sequencer( * transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_task->t_tasks_fua = (cdb[10] & 0x8); + cmd->t_task.t_tasks_fua = (cdb[10] & 0x8); break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->t_task->t_task_lba = get_unaligned_be64(&cdb[12]); + cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[12]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; /* @@ -3299,7 +3262,7 @@ static int transport_generic_cmd_sequencer( * Do implict HEAD_OF_QUEUE processing for INQUIRY. * See spc4r17 section 5.3 */ - if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; @@ -3405,10 +3368,10 @@ static int transport_generic_cmd_sequencer( */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb, cmd, §or_ret); - cmd->t_task->t_task_lba = transport_lba_32(cdb); + cmd->t_task.t_task_lba = transport_lba_32(cdb); } else { sectors = transport_get_sectors_16(cdb, cmd, §or_ret); - cmd->t_task->t_task_lba = transport_lba_64(cdb); + cmd->t_task.t_task_lba = transport_lba_64(cdb); } if (sector_ret) goto out_unsupported_cdb; @@ -3454,7 +3417,7 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); - cmd->t_task->t_task_lba = get_unaligned_be16(&cdb[2]); + cmd->t_task.t_task_lba = get_unaligned_be16(&cdb[2]); passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* @@ -3507,7 +3470,7 @@ static int transport_generic_cmd_sequencer( * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 */ - if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; @@ -3560,11 +3523,11 @@ static int transport_generic_cmd_sequencer( out_unsupported_cdb: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; - return -2; + return -EINVAL; out_invalid_cdb_field: cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; - return -2; + return -EINVAL; } static inline void transport_release_tasks(struct se_cmd *); @@ -3662,7 +3625,7 @@ static void transport_memcpy_se_mem_read_contig( */ static void transport_complete_task_attr(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_cmd *cmd_p, *cmd_tmp; int new_active_tasks = 0; @@ -3682,7 +3645,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { spin_lock(&dev->ordered_cmd_lock); - list_del(&cmd->se_ordered_list); + list_del(&cmd->se_ordered_node); atomic_dec(&dev->dev_ordered_sync); smp_mb__after_atomic_dec(); spin_unlock(&dev->ordered_cmd_lock); @@ -3698,9 +3661,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd) */ spin_lock(&dev->delayed_cmd_lock); list_for_each_entry_safe(cmd_p, cmd_tmp, - &dev->delayed_cmd_list, se_delayed_list) { + &dev->delayed_cmd_list, se_delayed_node) { - list_del(&cmd_p->se_delayed_list); + list_del(&cmd_p->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); DEBUG_STA("Calling add_tasks() for" @@ -3733,7 +3696,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task * Attribute. */ - if (cmd->se_lun->lun_se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) + if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); /* * Check if we need to retrieve a sense buffer from @@ -3777,8 +3740,8 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) */ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) transport_memcpy_write_contig(cmd, - cmd->t_task->t_task_pt_sgl, - cmd->t_task->t_task_buf); + cmd->t_task.t_task_pt_sgl, + cmd->t_task.t_task_buf); cmd->se_tfo->queue_data_in(cmd); break; @@ -3792,7 +3755,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (cmd->t_task->t_mem_bidi_list != NULL) { + if (!list_empty(&cmd->t_task.t_mem_bidi_list)) { spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) { cmd->se_lun->lun_sep->sep_stats.tx_data_octets += @@ -3819,9 +3782,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) struct se_task *task, *task_tmp; unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task->t_task_list, t_list) { + &cmd->t_task.t_task_list, t_list) { if (atomic_read(&task->task_active)) continue; @@ -3830,15 +3793,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) list_del(&task->t_list); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); if (task->se_dev) task->se_dev->transport->free_task(task); else printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", task->task_no); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } static inline void transport_free_pages(struct se_cmd *cmd) @@ -3851,9 +3814,9 @@ static inline void transport_free_pages(struct se_cmd *cmd) if (cmd->se_dev->transport->do_se_mem_map) free_page = 0; - if (cmd->t_task->t_task_buf) { - kfree(cmd->t_task->t_task_buf); - cmd->t_task->t_task_buf = NULL; + if (cmd->t_task.t_task_buf) { + kfree(cmd->t_task.t_task_buf); + cmd->t_task.t_task_buf = NULL; return; } @@ -3863,11 +3826,8 @@ static inline void transport_free_pages(struct se_cmd *cmd) if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) return; - if (!(cmd->t_task->t_tasks_se_num)) - return; - list_for_each_entry_safe(se_mem, se_mem_tmp, - cmd->t_task->t_mem_list, se_list) { + &cmd->t_task.t_mem_list, se_list) { /* * We only release call __free_page(struct se_mem->se_page) when * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, @@ -3878,27 +3838,21 @@ static inline void transport_free_pages(struct se_cmd *cmd) list_del(&se_mem->se_list); kmem_cache_free(se_mem_cache, se_mem); } + cmd->t_task.t_tasks_se_num = 0; - if (cmd->t_task->t_mem_bidi_list && cmd->t_task->t_tasks_se_bidi_num) { - list_for_each_entry_safe(se_mem, se_mem_tmp, - cmd->t_task->t_mem_bidi_list, se_list) { - /* - * We only release call __free_page(struct se_mem->se_page) when - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, - */ - if (free_page) - __free_page(se_mem->se_page); + list_for_each_entry_safe(se_mem, se_mem_tmp, + &cmd->t_task.t_mem_bidi_list, se_list) { + /* + * We only release call __free_page(struct se_mem->se_page) when + * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, + */ + if (free_page) + __free_page(se_mem->se_page); - list_del(&se_mem->se_list); - kmem_cache_free(se_mem_cache, se_mem); - } + list_del(&se_mem->se_list); + kmem_cache_free(se_mem_cache, se_mem); } - - kfree(cmd->t_task->t_mem_bidi_list); - cmd->t_task->t_mem_bidi_list = NULL; - kfree(cmd->t_task->t_mem_list); - cmd->t_task->t_mem_list = NULL; - cmd->t_task->t_tasks_se_num = 0; + cmd->t_task.t_tasks_se_bidi_num = 0; } static inline void transport_release_tasks(struct se_cmd *cmd) @@ -3910,23 +3864,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (atomic_read(&cmd->t_task->t_fe_count)) { - if (!(atomic_dec_and_test(&cmd->t_task->t_fe_count))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (atomic_read(&cmd->t_task.t_fe_count)) { + if (!(atomic_dec_and_test(&cmd->t_task.t_fe_count))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 1; } } - if (atomic_read(&cmd->t_task->t_se_count)) { - if (!(atomic_dec_and_test(&cmd->t_task->t_se_count))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + if (atomic_read(&cmd->t_task.t_se_count)) { + if (!(atomic_dec_and_test(&cmd->t_task.t_se_count))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 1; } } - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } @@ -3938,14 +3892,14 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) if (transport_dec_and_check(cmd)) return; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); goto free_pages; } - atomic_set(&cmd->t_task->transport_dev_active, 0); + atomic_set(&cmd->t_task.transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_release_tasks(cmd); free_pages: @@ -3961,33 +3915,30 @@ static int transport_generic_remove( { unsigned long flags; - if (!(cmd->t_task)) - goto release_cmd; - if (transport_dec_and_check(cmd)) { if (session_reinstatement) { - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); } return 1; } - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (!(atomic_read(&cmd->t_task->transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (!(atomic_read(&cmd->t_task.transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); goto free_pages; } - atomic_set(&cmd->t_task->transport_dev_active, 0); + atomic_set(&cmd->t_task.transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_release_tasks(cmd); + free_pages: transport_free_pages(cmd); -release_cmd: if (release_to_pool) { transport_release_cmd_to_pool(cmd); } else { @@ -4011,35 +3962,19 @@ release_cmd: */ int transport_generic_map_mem_to_cmd( struct se_cmd *cmd, - struct scatterlist *mem, - u32 sg_mem_num, - struct scatterlist *mem_bidi_in, - u32 sg_mem_bidi_num) + struct scatterlist *sgl, + u32 sgl_count, + struct scatterlist *sgl_bidi, + u32 sgl_bidi_count) { - u32 se_mem_cnt_out = 0; + u32 mapped_sg_count = 0; int ret; - if (!(mem) || !(sg_mem_num)) + if (!sgl || !sgl_count) return 0; - /* - * Passed *mem will contain a list_head containing preformatted - * struct se_mem elements... - */ - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) { - if ((mem_bidi_in) || (sg_mem_bidi_num)) { - printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported" - " with BIDI-COMMAND\n"); - return -ENOSYS; - } - cmd->t_task->t_mem_list = (struct list_head *)mem; - cmd->t_task->t_tasks_se_num = sg_mem_num; - cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC; - return 0; - } /* - * Otherwise, assume the caller is passing a struct scatterlist - * array from include/linux/scatterlist.h + * Convert sgls (sgl, sgl_bidi) to list of se_mems */ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { @@ -4048,41 +3983,29 @@ int transport_generic_map_mem_to_cmd( * processed into a TCM struct se_subsystem_dev, we do the mapping * from the passed physical memory to struct se_mem->se_page here. */ - cmd->t_task->t_mem_list = transport_init_se_mem_list(); - if (!(cmd->t_task->t_mem_list)) - return -ENOMEM; - ret = transport_map_sg_to_mem(cmd, - cmd->t_task->t_mem_list, mem, &se_mem_cnt_out); + &cmd->t_task.t_mem_list, sgl, &mapped_sg_count); if (ret < 0) return -ENOMEM; - cmd->t_task->t_tasks_se_num = se_mem_cnt_out; + cmd->t_task.t_tasks_se_num = mapped_sg_count; /* * Setup BIDI READ list of struct se_mem elements */ - if ((mem_bidi_in) && (sg_mem_bidi_num)) { - cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(cmd->t_task->t_mem_bidi_list)) { - kfree(cmd->t_task->t_mem_list); - return -ENOMEM; - } - se_mem_cnt_out = 0; - + if (sgl_bidi && sgl_bidi_count) { + mapped_sg_count = 0; ret = transport_map_sg_to_mem(cmd, - cmd->t_task->t_mem_bidi_list, mem_bidi_in, - &se_mem_cnt_out); - if (ret < 0) { - kfree(cmd->t_task->t_mem_list); + &cmd->t_task.t_mem_bidi_list, sgl_bidi, + &mapped_sg_count); + if (ret < 0) return -ENOMEM; - } - cmd->t_task->t_tasks_se_bidi_num = se_mem_cnt_out; + cmd->t_task.t_tasks_se_bidi_num = mapped_sg_count; } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { - if (mem_bidi_in || sg_mem_bidi_num) { + if (sgl_bidi || sgl_bidi_count) { printk(KERN_ERR "BIDI-Commands not supported using " "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); return -ENOSYS; @@ -4097,7 +4020,8 @@ int transport_generic_map_mem_to_cmd( * struct scatterlist format. */ cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; - cmd->t_task->t_task_pt_sgl = mem; + cmd->t_task.t_task_pt_sgl = sgl; + /* don't need sgl count? We assume it contains cmd->data_length data */ } return 0; @@ -4112,21 +4036,21 @@ static inline long long transport_dev_end_lba(struct se_device *dev) static int transport_get_sectors(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; - cmd->t_task->t_tasks_sectors = + cmd->t_task.t_tasks_sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); - if (!(cmd->t_task->t_tasks_sectors)) - cmd->t_task->t_tasks_sectors = 1; + if (!(cmd->t_task.t_tasks_sectors)) + cmd->t_task.t_tasks_sectors = 1; if (dev->transport->get_device_type(dev) != TYPE_DISK) return 0; - if ((cmd->t_task->t_task_lba + cmd->t_task->t_tasks_sectors) > + if ((cmd->t_task.t_task_lba + cmd->t_task.t_tasks_sectors) > transport_dev_end_lba(dev)) { printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" " transport_dev_end_lba(): %llu\n", - cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, + cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors, transport_dev_end_lba(dev)); cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; @@ -4138,26 +4062,26 @@ static int transport_get_sectors(struct se_cmd *cmd) static int transport_new_cmd_obj(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; u32 task_cdbs = 0, rc; if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { task_cdbs++; - cmd->t_task->t_task_cdbs++; + cmd->t_task.t_task_cdbs++; } else { int set_counts = 1; /* * Setup any BIDI READ tasks and memory from - * cmd->t_task->t_mem_bidi_list so the READ struct se_tasks + * cmd->t_task.t_mem_bidi_list so the READ struct se_tasks * are queued first for the non pSCSI passthrough case. */ - if ((cmd->t_task->t_mem_bidi_list != NULL) && + if (!list_empty(&cmd->t_task.t_mem_bidi_list) && (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { rc = transport_generic_get_cdb_count(cmd, - cmd->t_task->t_task_lba, - cmd->t_task->t_tasks_sectors, - DMA_FROM_DEVICE, cmd->t_task->t_mem_bidi_list, + cmd->t_task.t_task_lba, + cmd->t_task.t_tasks_sectors, + DMA_FROM_DEVICE, &cmd->t_task.t_mem_bidi_list, set_counts); if (!(rc)) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -4168,13 +4092,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) set_counts = 0; } /* - * Setup the tasks and memory from cmd->t_task->t_mem_list + * Setup the tasks and memory from cmd->t_task.t_mem_list * Note for BIDI transfers this will contain the WRITE payload */ task_cdbs = transport_generic_get_cdb_count(cmd, - cmd->t_task->t_task_lba, - cmd->t_task->t_tasks_sectors, - cmd->data_direction, cmd->t_task->t_mem_list, + cmd->t_task.t_task_lba, + cmd->t_task.t_tasks_sectors, + cmd->data_direction, &cmd->t_task.t_mem_list, set_counts); if (!(task_cdbs)) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -4182,63 +4106,34 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return PYX_TRANSPORT_LU_COMM_FAILURE; } - cmd->t_task->t_task_cdbs += task_cdbs; + cmd->t_task.t_task_cdbs += task_cdbs; #if 0 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, - cmd->t_task->t_task_lba, cmd->t_task->t_tasks_sectors, - cmd->t_task->t_task_cdbs); + cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors, + cmd->t_task.t_task_cdbs); #endif } - atomic_set(&cmd->t_task->t_task_cdbs_left, task_cdbs); - atomic_set(&cmd->t_task->t_task_cdbs_ex_left, task_cdbs); - atomic_set(&cmd->t_task->t_task_cdbs_timeout_left, task_cdbs); + atomic_set(&cmd->t_task.t_task_cdbs_left, task_cdbs); + atomic_set(&cmd->t_task.t_task_cdbs_ex_left, task_cdbs); + atomic_set(&cmd->t_task.t_task_cdbs_timeout_left, task_cdbs); return 0; } -static struct list_head *transport_init_se_mem_list(void) -{ - struct list_head *se_mem_list; - - se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL); - if (!(se_mem_list)) { - printk(KERN_ERR "Unable to allocate memory for se_mem_list\n"); - return NULL; - } - INIT_LIST_HEAD(se_mem_list); - - return se_mem_list; -} - static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) { unsigned char *buf; struct se_mem *se_mem; - cmd->t_task->t_mem_list = transport_init_se_mem_list(); - if (!(cmd->t_task->t_mem_list)) - return -ENOMEM; - /* * If the device uses memory mapping this is enough. */ if (cmd->se_dev->transport->do_se_mem_map) return 0; - /* - * Setup BIDI-COMMAND READ list of struct se_mem elements - */ - if (cmd->t_task->t_tasks_bidi) { - cmd->t_task->t_mem_bidi_list = transport_init_se_mem_list(); - if (!(cmd->t_task->t_mem_bidi_list)) { - kfree(cmd->t_task->t_mem_list); - return -ENOMEM; - } - } - while (length) { se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); if (!(se_mem)) { @@ -4263,8 +4158,8 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) memset(buf, 0, se_mem->se_len); kunmap_atomic(buf, KM_IRQ0); - list_add_tail(&se_mem->se_list, cmd->t_task->t_mem_list); - cmd->t_task->t_tasks_se_num++; + list_add_tail(&se_mem->se_list, &cmd->t_task.t_mem_list); + cmd->t_task.t_tasks_se_num++; DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" " Offset(%u)\n", se_mem->se_page, se_mem->se_len, @@ -4274,7 +4169,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) } DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", - cmd->t_task->t_tasks_se_num); + cmd->t_task.t_tasks_se_num); return 0; out: @@ -4290,7 +4185,7 @@ int transport_init_task_sg( u32 task_offset) { struct se_cmd *se_cmd = task->task_se_cmd; - struct se_device *se_dev = se_cmd->se_lun->lun_se_dev; + struct se_device *se_dev = se_cmd->se_dev; struct se_mem *se_mem = in_se_mem; struct target_core_fabric_ops *tfo = se_cmd->se_tfo; u32 sg_length, task_size = task->task_size, task_sg_num_padded; @@ -4306,7 +4201,7 @@ int transport_init_task_sg( sg_length = se_mem->se_len; if (!(list_is_last(&se_mem->se_list, - se_cmd->t_task->t_mem_list))) + &se_cmd->t_task.t_mem_list))) se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); } else { @@ -4326,7 +4221,7 @@ int transport_init_task_sg( sg_length = (se_mem->se_len - task_offset); if (!(list_is_last(&se_mem->se_list, - se_cmd->t_task->t_mem_list))) + &se_cmd->t_task.t_mem_list))) se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); } @@ -4367,7 +4262,7 @@ next: * Setup task->task_sg_bidi for SCSI READ payload for * TCM/pSCSI passthrough if present for BIDI-COMMAND */ - if ((se_cmd->t_task->t_mem_bidi_list != NULL) && + if (!list_empty(&se_cmd->t_task.t_mem_bidi_list) && (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { task->task_sg_bidi = kzalloc(task_sg_num_padded * sizeof(struct scatterlist), GFP_KERNEL); @@ -4458,21 +4353,26 @@ static inline int transport_set_tasks_sectors( max_sectors_set); } +/* + * Convert a sgl into a linked list of se_mems. + */ static int transport_map_sg_to_mem( struct se_cmd *cmd, struct list_head *se_mem_list, - void *in_mem, - u32 *se_mem_cnt) + struct scatterlist *sg, + u32 *sg_count) { struct se_mem *se_mem; - struct scatterlist *sg; - u32 sg_count = 1, cmd_size = cmd->data_length; + u32 cmd_size = cmd->data_length; - WARN_ON(!in_mem); - - sg = (struct scatterlist *)in_mem; + WARN_ON(!sg); while (cmd_size) { + /* + * NOTE: it is safe to return -ENOMEM at any time in creating this + * list because transport_free_pages() will eventually be called, and is + * smart enough to deallocate all list items for sg and sg_bidi lists. + */ se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); if (!(se_mem)) { printk(KERN_ERR "Unable to allocate struct se_mem\n"); @@ -4489,26 +4389,21 @@ static int transport_map_sg_to_mem( if (cmd_size > sg->length) { se_mem->se_len = sg->length; sg = sg_next(sg); - sg_count++; } else se_mem->se_len = cmd_size; cmd_size -= se_mem->se_len; + (*sg_count)++; - DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n", - *se_mem_cnt, cmd_size); + DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n", + sg_count, cmd_size); DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", se_mem->se_page, se_mem->se_off, se_mem->se_len); list_add_tail(&se_mem->se_list, se_mem_list); - (*se_mem_cnt)++; } - DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)" - " struct se_mem\n", sg_count, *se_mem_cnt); - - if (sg_count != *se_mem_cnt) - BUG(); + DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count); return 0; } @@ -4551,7 +4446,7 @@ int transport_map_mem_to_sg( sg->length = se_mem->se_len; if (!(list_is_last(&se_mem->se_list, - se_cmd->t_task->t_mem_list))) { + &se_cmd->t_task.t_mem_list))) { se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); (*se_mem_cnt)++; @@ -4587,7 +4482,7 @@ int transport_map_mem_to_sg( sg->length = (se_mem->se_len - *task_offset); if (!(list_is_last(&se_mem->se_list, - se_cmd->t_task->t_mem_list))) { + &se_cmd->t_task.t_mem_list))) { se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); (*se_mem_cnt)++; @@ -4645,7 +4540,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Walk the struct se_task list and setup scatterlist chains * for each contiguosly allocated struct se_task->task_sg[]. */ - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { if (!(task->task_sg) || !(task->task_padded_sg)) continue; @@ -4656,7 +4551,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Either add chain or mark end of scatterlist */ if (!(list_is_last(&task->t_list, - &cmd->t_task->t_task_list))) { + &cmd->t_task.t_task_list))) { /* * Clear existing SGL termination bit set in * transport_init_task_sg(), see sg_mark_end() @@ -4682,7 +4577,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) /* * Check for single task.. */ - if (!(list_is_last(&task->t_list, &cmd->t_task->t_task_list))) { + if (!(list_is_last(&task->t_list, &cmd->t_task.t_task_list))) { /* * Clear existing SGL termination bit set in * transport_init_task_sg(), see sg_mark_end() @@ -4700,18 +4595,18 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Setup the starting pointer and total t_tasks_sg_linked_no including * padding SGs for linking and to mark the end. */ - cmd->t_task->t_tasks_sg_chained = sg_first; - cmd->t_task->t_tasks_sg_chained_no = sg_count; + cmd->t_task.t_tasks_sg_chained = sg_first; + cmd->t_task.t_tasks_sg_chained_no = sg_count; - DEBUG_CMD_M("Setup cmd: %p cmd->t_task->t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task->t_tasks_sg_chained, - cmd->t_task->t_tasks_sg_chained_no); + DEBUG_CMD_M("Setup cmd: %p cmd->t_task.t_tasks_sg_chained: %p and" + " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task.t_tasks_sg_chained, + cmd->t_task.t_tasks_sg_chained_no); - for_each_sg(cmd->t_task->t_tasks_sg_chained, sg, - cmd->t_task->t_tasks_sg_chained_no, i) { + for_each_sg(cmd->t_task.t_tasks_sg_chained, sg, + cmd->t_task.t_tasks_sg_chained_no, i) { - DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n", - i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic); + DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", + i, sg, sg_page(sg), sg->length, sg->offset); if (sg_is_chain(sg)) DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); if (sg_is_last(sg)) @@ -4741,7 +4636,7 @@ static int transport_do_se_mem_map( in_mem, in_se_mem, out_se_mem, se_mem_cnt, task_offset_in); if (ret == 0) - task->task_se_cmd->t_task->t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; return ret; } @@ -4791,7 +4686,7 @@ static u32 transport_generic_get_cdb_count( struct se_task *task; struct se_mem *se_mem = NULL, *se_mem_lout = NULL; struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; int max_sectors_set = 0, ret; u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; @@ -4805,15 +4700,14 @@ static u32 transport_generic_get_cdb_count( * mem_list will ever be empty at this point. */ if (!(list_empty(mem_list))) - se_mem = list_entry(mem_list->next, struct se_mem, se_list); + se_mem = list_first_entry(mem_list, struct se_mem, se_list); /* * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation */ - if ((cmd->t_task->t_mem_bidi_list != NULL) && - !(list_empty(cmd->t_task->t_mem_bidi_list)) && + if (!list_empty(&cmd->t_task.t_mem_bidi_list) && (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) - se_mem_bidi = list_entry(cmd->t_task->t_mem_bidi_list->next, + se_mem_bidi = list_first_entry(&cmd->t_task.t_mem_bidi_list, struct se_mem, se_list); while (sectors) { @@ -4836,15 +4730,15 @@ static u32 transport_generic_get_cdb_count( cdb = dev->transport->get_cdb(task); if ((cdb)) { - memcpy(cdb, cmd->t_task->t_task_cdb, - scsi_command_size(cmd->t_task->t_task_cdb)); + memcpy(cdb, cmd->t_task.t_task_cdb, + scsi_command_size(cmd->t_task.t_task_cdb)); cmd->transport_split_cdb(task->task_lba, &task->task_sectors, cdb); } /* * Perform the SE OBJ plugin and/or Transport plugin specific - * mapping for cmd->t_task->t_mem_list. And setup the + * mapping for cmd->t_task.t_mem_list. And setup the * task->task_sg and if necessary task->task_sg_bidi */ ret = transport_do_se_mem_map(dev, task, mem_list, @@ -4855,7 +4749,7 @@ static u32 transport_generic_get_cdb_count( se_mem = se_mem_lout; /* - * Setup the cmd->t_task->t_mem_bidi_list -> task->task_sg_bidi + * Setup the cmd->t_task.t_mem_bidi_list -> task->task_sg_bidi * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI * * Note that the first call to transport_do_se_mem_map() above will @@ -4865,7 +4759,7 @@ static u32 transport_generic_get_cdb_count( */ if (task->task_sg_bidi != NULL) { ret = transport_do_se_mem_map(dev, task, - cmd->t_task->t_mem_bidi_list, NULL, + &cmd->t_task.t_mem_bidi_list, NULL, se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, &task_offset_in); if (ret < 0) @@ -4888,8 +4782,8 @@ static u32 transport_generic_get_cdb_count( } if (set_counts) { - atomic_inc(&cmd->t_task->t_fe_count); - atomic_inc(&cmd->t_task->t_se_count); + atomic_inc(&cmd->t_task.t_fe_count); + atomic_inc(&cmd->t_task.t_se_count); } DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", @@ -4904,7 +4798,7 @@ out: static int transport_map_control_cmd_to_task(struct se_cmd *cmd) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; unsigned char *cdb; struct se_task *task; int ret; @@ -4915,26 +4809,26 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) cdb = dev->transport->get_cdb(task); if (cdb) - memcpy(cdb, cmd->t_task->t_task_cdb, - scsi_command_size(cmd->t_task->t_task_cdb)); + memcpy(cdb, cmd->t_task.t_task_cdb, + scsi_command_size(cmd->t_task.t_task_cdb)); task->task_size = cmd->data_length; task->task_sg_num = (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; - atomic_inc(&cmd->t_task->t_fe_count); - atomic_inc(&cmd->t_task->t_se_count); + atomic_inc(&cmd->t_task.t_fe_count); + atomic_inc(&cmd->t_task.t_se_count); if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { struct se_mem *se_mem = NULL, *se_mem_lout = NULL; u32 se_mem_cnt = 0, task_offset = 0; - if (!list_empty(cmd->t_task->t_mem_list)) - se_mem = list_entry(cmd->t_task->t_mem_list->next, + if (!list_empty(&cmd->t_task.t_mem_list)) + se_mem = list_first_entry(&cmd->t_task.t_mem_list, struct se_mem, se_list); ret = transport_do_se_mem_map(dev, task, - cmd->t_task->t_mem_list, NULL, se_mem, + &cmd->t_task.t_mem_list, NULL, se_mem, &se_mem_lout, &se_mem_cnt, &task_offset); if (ret < 0) return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; @@ -4969,14 +4863,14 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) { struct se_portal_group *se_tpg; struct se_task *task; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; int ret = 0; /* * Determine is the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() * to setup beforehand the linked list of physical memory at - * cmd->t_task->t_mem_list of struct se_mem->se_page + * cmd->t_task.t_mem_list of struct se_mem->se_page */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { ret = transport_allocate_resources(cmd); @@ -5005,7 +4899,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) } if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - list_for_each_entry(task, &cmd->t_task->t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { if (atomic_read(&task->task_sent)) continue; if (!dev->transport->map_task_SG) @@ -5052,9 +4946,9 @@ void transport_generic_process_write(struct se_cmd *cmd) * original EDTL */ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { - if (!cmd->t_task->t_tasks_se_num) { + if (!cmd->t_task.t_tasks_se_num) { unsigned char *dst, *buf = - (unsigned char *)cmd->t_task->t_task_buf; + (unsigned char *)cmd->t_task.t_task_buf; dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); if (!(dst)) { @@ -5066,15 +4960,15 @@ void transport_generic_process_write(struct se_cmd *cmd) } memcpy(dst, buf, cmd->cmd_spdtl); - kfree(cmd->t_task->t_task_buf); - cmd->t_task->t_task_buf = dst; + kfree(cmd->t_task.t_task_buf); + cmd->t_task.t_task_buf = dst; } else { struct scatterlist *sg = - (struct scatterlist *sg)cmd->t_task->t_task_buf; + (struct scatterlist *sg)cmd->t_task.t_task_buf; struct scatterlist *orig_sg; orig_sg = kzalloc(sizeof(struct scatterlist) * - cmd->t_task->t_tasks_se_num, + cmd->t_task.t_tasks_se_num, GFP_KERNEL))) { if (!(orig_sg)) { printk(KERN_ERR "Unable to allocate memory" @@ -5084,9 +4978,9 @@ void transport_generic_process_write(struct se_cmd *cmd) return; } - memcpy(orig_sg, cmd->t_task->t_task_buf, + memcpy(orig_sg, cmd->t_task.t_task_buf, sizeof(struct scatterlist) * - cmd->t_task->t_tasks_se_num); + cmd->t_task.t_tasks_se_num); cmd->data_length = cmd->cmd_spdtl; /* @@ -5117,22 +5011,22 @@ static int transport_generic_write_pending(struct se_cmd *cmd) unsigned long flags; int ret; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); cmd->t_state = TRANSPORT_WRITE_PENDING; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); /* * For the TCM control CDBs using a contiguous buffer, do the memcpy * from the passed Linux/SCSI struct scatterlist located at - * se_cmd->t_task->t_task_pt_buf to the contiguous buffer at - * se_cmd->t_task->t_task_buf. + * se_cmd->t_task.t_task_pt_buf to the contiguous buffer at + * se_cmd->t_task.t_task_buf. */ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) transport_memcpy_read_contig(cmd, - cmd->t_task->t_task_buf, - cmd->t_task->t_task_pt_sgl); + cmd->t_task.t_task_buf, + cmd->t_task.t_task_pt_sgl); /* * Clear the se_cmd for WRITE_PENDING status in order to set - * cmd->t_task->t_transport_active=0 so that transport_generic_handle_data + * cmd->t_task.t_transport_active=0 so that transport_generic_handle_data * can be called from HW target mode interrupt code. This is safe * to be called with transport_off=1 before the cmd->se_tfo->write_pending * because the se_cmd->se_lun pointer is not being cleared. @@ -5156,7 +5050,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd) */ void transport_release_cmd_to_pool(struct se_cmd *cmd) { - BUG_ON(!cmd->t_task); BUG_ON(!cmd->se_tfo); transport_free_se_cmd(cmd); @@ -5174,7 +5067,7 @@ void transport_generic_free_cmd( int release_to_pool, int session_reinstatement) { - if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !cmd->t_task) + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) transport_release_cmd_to_pool(cmd); else { core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); @@ -5220,32 +5113,32 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) * If the frontend has already requested this struct se_cmd to * be stopped, we can safely ignore this struct se_cmd. */ - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - if (atomic_read(&cmd->t_task->t_transport_stop)) { - atomic_set(&cmd->t_task->transport_lun_stop, 0); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + if (atomic_read(&cmd->t_task.t_transport_stop)) { + atomic_set(&cmd->t_task.transport_lun_stop, 0); DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); transport_cmd_check_stop(cmd, 1, 0); return -EPERM; } - atomic_set(&cmd->t_task->transport_lun_fe_stop, 1); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + atomic_set(&cmd->t_task.transport_lun_fe_stop, 1); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); ret = transport_stop_tasks_for_cmd(cmd); DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" - " %d\n", cmd, cmd->t_task->t_task_cdbs, ret); + " %d\n", cmd, cmd->t_task.t_task_cdbs, ret); if (!ret) { DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", cmd->se_tfo->get_task_tag(cmd)); - wait_for_completion(&cmd->t_task->transport_lun_stop_comp); + wait_for_completion(&cmd->t_task.transport_lun_stop_comp); DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", cmd->se_tfo->get_task_tag(cmd)); } - transport_remove_cmd_from_queue(cmd, &cmd->se_lun->lun_se_dev->dev_queue_obj); + transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); return 0; } @@ -5266,31 +5159,24 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) * Initiator Port. */ spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); - while (!list_empty_careful(&lun->lun_cmd_list)) { - cmd = list_entry(lun->lun_cmd_list.next, - struct se_cmd, se_lun_list); - list_del(&cmd->se_lun_list); - - if (!(cmd->t_task)) { - printk(KERN_ERR "ITT: 0x%08x, cmd->t_task = NULL" - "[i,t]_state: %u/%u\n", - cmd->se_tfo->get_task_tag(cmd), - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); - BUG(); - } - atomic_set(&cmd->t_task->transport_lun_active, 0); + while (!list_empty(&lun->lun_cmd_list)) { + cmd = list_first_entry(&lun->lun_cmd_list, + struct se_cmd, se_lun_node); + list_del(&cmd->se_lun_node); + + atomic_set(&cmd->t_task.transport_lun_active, 0); /* * This will notify iscsi_target_transport.c: * transport_cmd_check_stop() that a LUN shutdown is in * progress for the iscsi_cmd_t. */ - spin_lock(&cmd->t_task->t_state_lock); - DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task->transport" + spin_lock(&cmd->t_task.t_state_lock); + DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task.transport" "_lun_stop for ITT: 0x%08x\n", cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&cmd->t_task->transport_lun_stop, 1); - spin_unlock(&cmd->t_task->t_state_lock); + atomic_set(&cmd->t_task.transport_lun_stop, 1); + spin_unlock(&cmd->t_task.t_state_lock); spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5318,14 +5204,14 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); - if (!(atomic_read(&cmd->t_task->transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags); + if (!(atomic_read(&cmd->t_task.transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); goto check_cond; } - atomic_set(&cmd->t_task->transport_dev_active, 0); + atomic_set(&cmd->t_task.transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); transport_free_dev_tasks(cmd); /* @@ -5342,24 +5228,24 @@ check_cond: * be released, notify the waiting thread now that LU has * finished accessing it. */ - spin_lock_irqsave(&cmd->t_task->t_state_lock, cmd_flags); - if (atomic_read(&cmd->t_task->transport_lun_fe_stop)) { + spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags); + if (atomic_read(&cmd->t_task.transport_lun_fe_stop)) { DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" " struct se_cmd: %p ITT: 0x%08x\n", lun->unpacked_lun, cmd, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); transport_cmd_check_stop(cmd, 1, 0); - complete(&cmd->t_task->transport_lun_fe_stop_comp); + complete(&cmd->t_task.transport_lun_fe_stop_comp); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); } spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5379,7 +5265,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) { struct task_struct *kt; - kt = kthread_run(transport_clear_lun_thread, (void *)lun, + kt = kthread_run(transport_clear_lun_thread, lun, "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { printk(KERN_ERR "Unable to start clear_lun thread\n"); @@ -5405,15 +5291,15 @@ static void transport_generic_wait_for_tasks( if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) return; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); /* * If we are already stopped due to an external event (ie: LUN shutdown) * sleep until the connection can have the passed struct se_cmd back. - * The cmd->t_task->transport_lun_stopped_sem will be upped by + * The cmd->t_task.transport_lun_stopped_sem will be upped by * transport_clear_lun_from_sessions() once the ConfigFS context caller * has completed its operation on the struct se_cmd. */ - if (atomic_read(&cmd->t_task->transport_lun_stop)) { + if (atomic_read(&cmd->t_task.transport_lun_stop)) { DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" " wait_for_completion(&cmd->t_tasktransport_lun_fe" @@ -5426,10 +5312,10 @@ static void transport_generic_wait_for_tasks( * We go ahead and up transport_lun_stop_comp just to be sure * here. */ - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); - complete(&cmd->t_task->transport_lun_stop_comp); - wait_for_completion(&cmd->t_task->transport_lun_fe_stop_comp); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + complete(&cmd->t_task.transport_lun_stop_comp); + wait_for_completion(&cmd->t_task.transport_lun_fe_stop_comp); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); transport_all_task_dev_remove_state(cmd); /* @@ -5442,13 +5328,13 @@ static void transport_generic_wait_for_tasks( "stop_comp); for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&cmd->t_task->transport_lun_stop, 0); + atomic_set(&cmd->t_task.transport_lun_stop, 0); } - if (!atomic_read(&cmd->t_task->t_transport_active) || - atomic_read(&cmd->t_task->t_transport_aborted)) + if (!atomic_read(&cmd->t_task.t_transport_active) || + atomic_read(&cmd->t_task.t_transport_aborted)) goto remove; - atomic_set(&cmd->t_task->t_transport_stop, 1); + atomic_set(&cmd->t_task.t_transport_stop, 1); DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" @@ -5456,21 +5342,21 @@ static void transport_generic_wait_for_tasks( cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state); - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - wake_up_interruptible(&cmd->se_lun->lun_se_dev->dev_queue_obj.thread_wq); + wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); - wait_for_completion(&cmd->t_task->t_transport_stop_comp); + wait_for_completion(&cmd->t_task.t_transport_stop_comp); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - atomic_set(&cmd->t_task->t_transport_active, 0); - atomic_set(&cmd->t_task->t_transport_stop, 0); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + atomic_set(&cmd->t_task.t_transport_active, 0); + atomic_set(&cmd->t_task.t_transport_stop, 0); DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" - "&cmd->t_task->t_transport_stop_comp) for ITT: 0x%08x\n", + "&cmd->t_task.t_transport_stop_comp) for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); remove: - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); if (!remove_cmd) return; @@ -5509,13 +5395,13 @@ int transport_send_check_condition_and_sense( int offset; u8 asc = 0, ascq = 0; - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); return 0; } cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; - spin_unlock_irqrestore(&cmd->t_task->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); if (!reason && from_transport) goto after_reason; @@ -5674,14 +5560,14 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) { int ret = 0; - if (atomic_read(&cmd->t_task->t_transport_aborted) != 0) { + if (atomic_read(&cmd->t_task.t_transport_aborted) != 0) { if (!(send_status) || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; #if 0 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", - cmd->t_task->t_task_cdb[0], + cmd->t_task.t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; @@ -5702,7 +5588,7 @@ void transport_send_task_abort(struct se_cmd *cmd) */ if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->se_tfo->write_pending_status(cmd) != 0) { - atomic_inc(&cmd->t_task->t_transport_aborted); + atomic_inc(&cmd->t_task.t_transport_aborted); smp_mb__after_atomic_inc(); cmd->scsi_status = SAM_STAT_TASK_ABORTED; transport_new_cmd_failure(cmd); @@ -5712,7 +5598,7 @@ void transport_send_task_abort(struct se_cmd *cmd) cmd->scsi_status = SAM_STAT_TASK_ABORTED; #if 0 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," - " ITT: 0x%08x\n", cmd->t_task->t_task_cdb[0], + " ITT: 0x%08x\n", cmd->t_task.t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_tfo->queue_status(cmd); @@ -5725,7 +5611,7 @@ void transport_send_task_abort(struct se_cmd *cmd) int transport_generic_do_tmr(struct se_cmd *cmd) { struct se_cmd *ref_cmd; - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; @@ -5788,9 +5674,7 @@ transport_get_task_from_state_list(struct se_device *dev) static void transport_processing_shutdown(struct se_device *dev) { struct se_cmd *cmd; - struct se_queue_req *qr; struct se_task *task; - u8 state; unsigned long flags; /* * Empty the struct se_device's struct se_task state list. @@ -5803,15 +5687,9 @@ static void transport_processing_shutdown(struct se_device *dev) } cmd = task->task_se_cmd; - if (!cmd->t_task) { - printk(KERN_ERR "cmd->t_task is NULL for task: %p cmd:" - " %p ITT: 0x%08x\n", task, cmd, - cmd->se_tfo->get_task_tag(cmd)); - continue; - } spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," " i_state/def_i_state: %d/%d, t_state/def_t_state:" @@ -5819,22 +5697,22 @@ static void transport_processing_shutdown(struct se_device *dev) cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, cmd->t_state, cmd->deferred_t_state, - cmd->t_task->t_task_cdb[0]); + cmd->t_task.t_task_cdb[0]); DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" " %d t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", cmd->se_tfo->get_task_tag(cmd), - cmd->t_task->t_task_cdbs, - atomic_read(&cmd->t_task->t_task_cdbs_left), - atomic_read(&cmd->t_task->t_task_cdbs_sent), - atomic_read(&cmd->t_task->t_transport_active), - atomic_read(&cmd->t_task->t_transport_stop), - atomic_read(&cmd->t_task->t_transport_sent)); + cmd->t_task.t_task_cdbs, + atomic_read(&cmd->t_task.t_task_cdbs_left), + atomic_read(&cmd->t_task.t_task_cdbs_sent), + atomic_read(&cmd->t_task.t_transport_active), + atomic_read(&cmd->t_task.t_transport_stop), + atomic_read(&cmd->t_task.t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); DEBUG_DO("Waiting for task: %p to shutdown for dev:" " %p\n", task, dev); @@ -5842,8 +5720,8 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("Completed task: %p shutdown for dev: %p\n", task, dev); - spin_lock_irqsave(&cmd->t_task->t_state_lock, flags); - atomic_dec(&cmd->t_task->t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + atomic_dec(&cmd->t_task.t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -5853,39 +5731,39 @@ static void transport_processing_shutdown(struct se_device *dev) } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&cmd->t_task->t_task_cdbs_ex_left))) { + if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); DEBUG_DO("Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task->t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task.t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - if (atomic_read(&cmd->t_task->t_transport_active)) { + if (atomic_read(&cmd->t_task.t_transport_active)) { DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" " %p\n", task, dev); - if (atomic_read(&cmd->t_task->t_fe_count)) { + if (atomic_read(&cmd->t_task.t_fe_count)) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); transport_send_check_condition_and_sense( cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - &cmd->se_lun->lun_se_dev->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - &cmd->se_lun->lun_se_dev->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); @@ -5899,22 +5777,22 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", task, dev); - if (atomic_read(&cmd->t_task->t_fe_count)) { + if (atomic_read(&cmd->t_task.t_fe_count)) { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, - &cmd->se_lun->lun_se_dev->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &cmd->t_task->t_state_lock, flags); + &cmd->t_task.t_state_lock, flags); transport_remove_cmd_from_queue(cmd, - &cmd->se_lun->lun_se_dev->dev_queue_obj); + &cmd->se_dev->dev_queue_obj); transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) @@ -5927,15 +5805,12 @@ static void transport_processing_shutdown(struct se_device *dev) /* * Empty the struct se_device's struct se_cmd list. */ - while ((qr = transport_get_qr_from_queue(&dev->dev_queue_obj))) { - cmd = qr->cmd; - state = qr->state; - kfree(qr); + while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", - cmd, state); + cmd, cmd->t_state); - if (atomic_read(&cmd->t_task->t_fe_count)) { + if (atomic_read(&cmd->t_task.t_fe_count)) { transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); @@ -5955,10 +5830,9 @@ static void transport_processing_shutdown(struct se_device *dev) */ static int transport_processing_thread(void *param) { - int ret, t_state; + int ret; struct se_cmd *cmd; struct se_device *dev = (struct se_device *) param; - struct se_queue_req *qr; set_user_nice(current, -20); @@ -5980,15 +5854,11 @@ static int transport_processing_thread(void *param) get_cmd: __transport_execute_tasks(dev); - qr = transport_get_qr_from_queue(&dev->dev_queue_obj); - if (!(qr)) + cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj); + if (!cmd) continue; - cmd = qr->cmd; - t_state = qr->state; - kfree(qr); - - switch (t_state) { + switch (cmd->t_state) { case TRANSPORT_NEW_CMD_MAP: if (!(cmd->se_tfo->new_cmd_map)) { printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" @@ -6039,7 +5909,7 @@ get_cmd: default: printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" " %d for ITT: 0x%08x i_state: %d on SE LUN:" - " %u\n", t_state, cmd->deferred_t_state, + " %u\n", cmd->t_state, cmd->deferred_t_state, cmd->se_tfo->get_task_tag(cmd), cmd->se_tfo->get_cmd_state(cmd), cmd->se_lun->unpacked_lun); diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 16f41d188e26..3b8b02cf4b41 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition( u8 *asc, u8 *ascq) { - struct se_device *dev = cmd->se_lun->lun_se_dev; + struct se_device *dev = cmd->se_dev; struct se_dev_entry *deve; struct se_session *sess = cmd->se_sess; struct se_node_acl *nacl; @@ -270,7 +270,7 @@ void core_scsi3_ua_for_check_condition( nacl->se_tpg->se_tpg_tfo->get_fabric_name(), (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, - cmd->orig_fe_lun, cmd->t_task->t_task_cdb[0], *asc, *ascq); + cmd->orig_fe_lun, cmd->t_task.t_task_cdb[0], *asc, *ascq); } int core_scsi3_ua_clear_for_request_sense( diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 19b2b9948314..6d9553bbba30 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -72,16 +72,16 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) caller, cmd, cmd->cdb); printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); - task = se_cmd->t_task; + task = &se_cmd->t_task; printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", caller, cmd, task, task->t_tasks_se_num, task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); - if (task->t_mem_list) - list_for_each_entry(mem, task->t_mem_list, se_list) - printk(KERN_INFO "%s: cmd %p mem %p page %p " - "len 0x%x off 0x%x\n", - caller, cmd, mem, - mem->se_page, mem->se_len, mem->se_off); + + list_for_each_entry(mem, &task->t_mem_list, se_list) + printk(KERN_INFO "%s: cmd %p mem %p page %p " + "len 0x%x off 0x%x\n", + caller, cmd, mem, + mem->se_page, mem->se_len, mem->se_off); sp = cmd->seq; if (sp) { ep = fc_seq_exch(sp); @@ -262,9 +262,9 @@ int ft_write_pending(struct se_cmd *se_cmd) * TCM/LIO target */ transport_do_task_sg_chain(se_cmd); - cmd->sg = se_cmd->t_task->t_tasks_sg_chained; + cmd->sg = se_cmd->t_task.t_tasks_sg_chained; cmd->sg_cnt = - se_cmd->t_task->t_tasks_sg_chained_no; + se_cmd->t_task.t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, cmd->sg, cmd->sg_cnt)) @@ -438,7 +438,7 @@ static void ft_send_tm(struct ft_cmd *cmd) switch (fcp->fc_tm_flags) { case FCP_TMF_LUN_RESET: cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); - if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) { + if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) { /* * Make sure to clean up newly allocated TMR request * since "unable to handle TMR request because failed @@ -637,7 +637,7 @@ static void ft_send_cmd(struct ft_cmd *cmd) fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); - ret = transport_get_lun_for_cmd(&cmd->se_cmd, cmd->lun); + ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun); if (ret < 0) { ft_dump_cmd(cmd, __func__); transport_send_check_condition_and_sense(&cmd->se_cmd, @@ -650,13 +650,13 @@ static void ft_send_cmd(struct ft_cmd *cmd) FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); ft_dump_cmd(cmd, __func__); - if (ret == -1) { + if (ret == -ENOMEM) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_generic_free_cmd(se_cmd, 0, 1, 0); return; } - if (ret == -2) { + if (ret == -EINVAL) { if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) ft_queue_status(se_cmd); else diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8c5067c65720..58e4745749db 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -331,7 +331,7 @@ static struct se_portal_group *ft_add_tpg( transport_init_queue_obj(&tpg->qobj); ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg, - (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL); + tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) { kfree(tpg); return NULL; diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 47efcfb9f4b8..f18af6e99b83 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -90,15 +90,14 @@ int ft_queue_data_in(struct se_cmd *se_cmd) lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); - task = se_cmd->t_task; - BUG_ON(!task); + task = &se_cmd->t_task; remaining = se_cmd->data_length; /* * Setup to use first mem list entry if any. */ if (task->t_tasks_se_num) { - mem = list_first_entry(task->t_mem_list, + mem = list_first_entry(&task->t_mem_list, struct se_mem, se_list); mem_len = mem->se_len; mem_off = mem->se_off; @@ -236,8 +235,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) u32 f_ctl; void *buf; - task = se_cmd->t_task; - BUG_ON(!task); + task = &se_cmd->t_task; fh = fc_frame_header_get(fp); if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) @@ -315,7 +313,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) * Setup to use first mem list entry if any. */ if (task->t_tasks_se_num) { - mem = list_first_entry(task->t_mem_list, + mem = list_first_entry(&task->t_mem_list, struct se_mem, se_list); mem_len = mem->se_len; mem_off = mem->se_off; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index b0b83edbe453..94c838dcfc3c 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -123,7 +123,7 @@ enum se_cmd_flags_table { SCF_SENT_DELAYED_TAS = 0x00020000, SCF_ALUA_NON_OPTIMIZED = 0x00040000, SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, - SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000, + SCF_UNUSED = 0x00100000, SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, SCF_EMULATE_SYNC_CACHE = 0x00800000, @@ -452,9 +452,9 @@ struct se_transport_task { * and other HW target mode fabric modules. */ struct scatterlist *t_task_pt_sgl; - struct list_head *t_mem_list; + struct list_head t_mem_list; /* Used for BIDI READ */ - struct list_head *t_mem_bidi_list; + struct list_head t_mem_bidi_list; struct list_head t_task_list; } ____cacheline_aligned; @@ -523,9 +523,9 @@ struct se_cmd { atomic_t transport_sent; /* Used for sense data */ void *sense_buffer; - struct list_head se_delayed_list; - struct list_head se_ordered_list; - struct list_head se_lun_list; + struct list_head se_delayed_node; + struct list_head se_ordered_node; + struct list_head se_lun_node; struct se_device *se_dev; struct se_dev_entry *se_deve; struct se_device *se_obj_ptr; @@ -534,9 +534,8 @@ struct se_cmd { /* Only used for internal passthrough and legacy TCM fabric modules */ struct se_session *se_sess; struct se_tmr_req *se_tmr_req; - /* t_task is setup to t_task_backstore in transport_init_se_cmd() */ - struct se_transport_task *t_task; - struct se_transport_task t_task_backstore; + struct se_transport_task t_task; + struct list_head se_queue_node; struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h index d9745bfa4429..96586cc94984 100644 --- a/include/target/target_core_device.h +++ b/include/target/target_core_device.h @@ -1,8 +1,8 @@ #ifndef TARGET_CORE_DEVICE_H #define TARGET_CORE_DEVICE_H -extern int transport_get_lun_for_cmd(struct se_cmd *, u32); -extern int transport_get_lun_for_tmr(struct se_cmd *, u32); +extern int transport_lookup_cmd_lun(struct se_cmd *, u32); +extern int transport_lookup_tmr_lun(struct se_cmd *, u32); extern struct se_dev_entry *core_get_se_deve_from_rtpi( struct se_node_acl *, u16); extern int core_free_device_list_for_node(struct se_node_acl *, diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 1dd4d1841497..acd591491767 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -159,7 +159,6 @@ extern struct se_device *transport_add_device_to_core_hba(struct se_hba *, struct se_subsystem_dev *, u32, void *, struct se_dev_limits *, const char *, const char *); -extern void transport_device_setup_cmd(struct se_cmd *); extern void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, struct se_session *, u32, int, int, -- cgit v1.2.3 From a1d8b49abd60ba5d09e7c968731abcb0f8f1cbf6 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Mon, 2 May 2011 17:12:10 -0700 Subject: target: Updates from AGrover and HCH (round 3) This patch contains a squashed version of third round series cleanups, improvements ,and simplfications from Andy and Christoph ahead of the heavy lifting between round 3 -> 4 for the target core SGL conversion. This include cleanups to the main target I/O path and other miscellaneous updates. target: Replace custom sg<->buf functions with lib funcs target: Simplify sector limiting code target: get_cdb should never return NULL target: Simplify transport_memcpy_se_mem_read_contig target: Use assignment rather than increment for t_task_cdbs target: Don't pass dma_size to generic_get_mem target: Pass sg with type scatterlist in transport_map_sg_to_mem target: Move task_sg_num next to task_sg in struct se_task target: inline struct se_transport_task into struct se_cmd target: Change name & semantics of transport_get_sectors() target: Remove unused members of se_cmd target: Rename se_cmd.t_task_cdbs to t_task_list_num target: Fix some spelling target: Remove unused var from transport_generic_do_tmr target: map_sg_to_mem: return sg_count in return value target/pscsi: Use min_t for sector limits target/pscsi: Unused param for pscsi_get_bio() target: Rename get_cdb_count to allocate_tasks target: Make transport_generic_new_cmd() available for iscsi-target target: Remove fabric callback to allocate iovecs target: Fix transport_generic_new_cmd WRITE comment (hch: Use __GFP_ZERO usage for alloc_pages() usage) Signed-off-by: Andy Grover Reviewed-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 11 +- drivers/target/target_core_alua.c | 4 +- drivers/target/target_core_cdb.c | 39 +- drivers/target/target_core_device.c | 6 +- drivers/target/target_core_file.c | 8 +- drivers/target/target_core_iblock.c | 4 +- drivers/target/target_core_pr.c | 22 +- drivers/target/target_core_pscsi.c | 20 +- drivers/target/target_core_rd.c | 8 +- drivers/target/target_core_tmr.c | 56 +- drivers/target/target_core_transport.c | 1008 +++++++++++++------------------ drivers/target/target_core_ua.c | 2 +- drivers/target/tcm_fc/tfc_cmd.c | 14 +- drivers/target/tcm_fc/tfc_io.c | 21 +- include/target/target_core_base.h | 105 ++-- include/target/target_core_fabric_ops.h | 5 - include/target/target_core_transport.h | 3 +- 17 files changed, 583 insertions(+), 753 deletions(-) (limited to 'include') diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index eeb7ee7ab9f7..7ba2542aabe7 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -118,7 +118,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi */ if (scsi_bidi_cmnd(sc)) - se_cmd->t_task.t_tasks_bidi = 1; + se_cmd->t_tasks_bidi = 1; /* * Locate the struct se_lun pointer and attach it to struct se_cmd */ @@ -169,7 +169,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) * For BIDI commands, pass in the extra READ buffer * to transport_generic_map_mem_to_cmd() below.. */ - if (se_cmd->t_task.t_tasks_bidi) { + if (se_cmd->t_tasks_bidi) { struct scsi_data_buffer *sdb = scsi_in(sc); sgl_bidi = sdb->table.sgl; @@ -1423,13 +1423,6 @@ static int tcm_loop_register_configfs(void) fabric->tf_ops.tpg_release_fabric_acl = &tcm_loop_tpg_release_fabric_acl; fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index; - /* - * Since tcm_loop is mapping physical memory from Linux/SCSI - * struct scatterlist arrays for each struct scsi_cmnd I/O, - * we do not need TCM to allocate a iovec array for - * virtual memory address mappings - */ - fabric->tf_ops.alloc_cmd_iovecs = NULL; /* * Used for setting up remaining TCM resources in process context */ diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 76abd86b6a73..76d506fe99e0 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -65,7 +65,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ @@ -157,7 +157,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) struct se_node_acl *nacl = cmd->se_sess->se_node_acl; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ u32 len = 4; /* Skip over RESERVED area in header */ int alua_access_state, primary = 0, rc; diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 8d5a0fc3a220..09ef3f811567 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -66,7 +66,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task.t_task_buf; + unsigned char *buf = cmd->t_task_buf; /* * Make sure we at least have 6 bytes of INQUIRY response @@ -621,8 +621,8 @@ static int target_emulate_inquiry(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task.t_task_buf; - unsigned char *cdb = cmd->t_task.t_task_cdb; + unsigned char *buf = cmd->t_task_buf; + unsigned char *cdb = cmd->t_task_cdb; if (!(cdb[1] & 0x1)) return target_emulate_inquiry_std(cmd); @@ -666,7 +666,7 @@ static int target_emulate_readcapacity(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task.t_task_buf; + unsigned char *buf = cmd->t_task_buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); u32 blocks; @@ -696,7 +696,7 @@ static int target_emulate_readcapacity_16(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task.t_task_buf; + unsigned char *buf = cmd->t_task_buf; unsigned long long blocks = dev->transport->get_blocks(dev); buf[0] = (blocks >> 56) & 0xff; @@ -831,8 +831,8 @@ static int target_emulate_modesense(struct se_cmd *cmd, int ten) { struct se_device *dev = cmd->se_dev; - char *cdb = cmd->t_task.t_task_cdb; - unsigned char *rbuf = cmd->t_task.t_task_buf; + char *cdb = cmd->t_task_cdb; + unsigned char *rbuf = cmd->t_task_buf; int type = dev->transport->get_device_type(dev); int offset = (ten) ? 8 : 4; int length = 0; @@ -903,8 +903,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) static int target_emulate_request_sense(struct se_cmd *cmd) { - unsigned char *cdb = cmd->t_task.t_task_cdb; - unsigned char *buf = cmd->t_task.t_task_buf; + unsigned char *cdb = cmd->t_task_cdb; + unsigned char *buf = cmd->t_task_buf; u8 ua_asc = 0, ua_ascq = 0; if (cdb[1] & 0x01) { @@ -965,8 +965,8 @@ target_emulate_unmap(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task.t_task_buf, *ptr = NULL; - unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; + unsigned char *buf = cmd->t_task_buf, *ptr = NULL; + unsigned char *cdb = &cmd->t_task_cdb[0]; sector_t lba; unsigned int size = cmd->data_length, range; int ret, offset; @@ -1012,7 +1012,8 @@ target_emulate_write_same(struct se_task *task, int write_same32) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - sector_t range, lba = cmd->t_task.t_task_lba; + sector_t range; + sector_t lba = cmd->t_task_lba; unsigned int num_blocks; int ret; /* @@ -1021,9 +1022,9 @@ target_emulate_write_same(struct se_task *task, int write_same32) * range based on ->get_blocks() - starting LBA. */ if (write_same32) - num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[28]); + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); else - num_blocks = get_unaligned_be32(&cmd->t_task.t_task_cdb[10]); + num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); if (num_blocks != 0) range = num_blocks; @@ -1052,7 +1053,7 @@ transport_emulate_control_cdb(struct se_task *task) unsigned short service_action; int ret = 0; - switch (cmd->t_task.t_task_cdb[0]) { + switch (cmd->t_task_cdb[0]) { case INQUIRY: ret = target_emulate_inquiry(cmd); break; @@ -1066,13 +1067,13 @@ transport_emulate_control_cdb(struct se_task *task) ret = target_emulate_modesense(cmd, 1); break; case SERVICE_ACTION_IN: - switch (cmd->t_task.t_task_cdb[1] & 0x1f) { + switch (cmd->t_task_cdb[1] & 0x1f) { case SAI_READ_CAPACITY_16: ret = target_emulate_readcapacity_16(cmd); break; default: printk(KERN_ERR "Unsupported SA: 0x%02x\n", - cmd->t_task.t_task_cdb[1] & 0x1f); + cmd->t_task_cdb[1] & 0x1f); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } break; @@ -1097,7 +1098,7 @@ transport_emulate_control_cdb(struct se_task *task) break; case VARIABLE_LENGTH_CMD: service_action = - get_unaligned_be16(&cmd->t_task.t_task_cdb[8]); + get_unaligned_be16(&cmd->t_task_cdb[8]); switch (service_action) { case WRITE_SAME_32: if (!dev->transport->do_discard) { @@ -1136,7 +1137,7 @@ transport_emulate_control_cdb(struct se_task *task) break; default: printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", - cmd->t_task.t_task_cdb[0], dev->transport->name); + cmd->t_task_cdb[0], dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ea92f75d215e..c674a5d74218 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -168,7 +168,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) */ spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list); - atomic_set(&se_cmd->t_task.transport_lun_active, 1); + atomic_set(&se_cmd->transport_lun_active, 1); spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); return 0; @@ -656,10 +656,10 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) struct se_lun *se_lun; struct se_session *se_sess = se_cmd->se_sess; struct se_task *se_task; - unsigned char *buf = se_cmd->t_task.t_task_buf; + unsigned char *buf = se_cmd->t_task_buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - list_for_each_entry(se_task, &se_cmd->t_task.t_task_list, t_list) + list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) break; if (!(se_task)) { diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 2e7ea7457501..5c47f4202386 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -377,7 +377,7 @@ static void fd_emulate_sync_cache(struct se_task *task) struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; struct fd_dev *fd_dev = dev->dev_ptr; - int immed = (cmd->t_task.t_task_cdb[1] & 0x2); + int immed = (cmd->t_task_cdb[1] & 0x2); loff_t start, end; int ret; @@ -391,11 +391,11 @@ static void fd_emulate_sync_cache(struct se_task *task) /* * Determine if we will be flushing the entire device. */ - if (cmd->t_task.t_task_lba == 0 && cmd->data_length == 0) { + if (cmd->t_task_lba == 0 && cmd->data_length == 0) { start = 0; end = LLONG_MAX; } else { - start = cmd->t_task.t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; + start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size; if (cmd->data_length) end = start + cmd->data_length; else @@ -475,7 +475,7 @@ static int fd_do_task(struct se_task *task) if (ret > 0 && dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 && dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - cmd->t_task.t_tasks_fua) { + cmd->t_tasks_fua) { /* * We might need to be a bit smarter here * and return some sense data to let the initiator diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c73baefeab8e..814a85b954f0 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -331,7 +331,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; - int immed = (cmd->t_task.t_task_cdb[1] & 0x2); + int immed = (cmd->t_task_cdb[1] & 0x2); sector_t error_sector; int ret; @@ -400,7 +400,7 @@ static int iblock_do_task(struct se_task *task) */ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && - task->task_se_cmd->t_task.t_tasks_fua)) + task->task_se_cmd->t_tasks_fua)) rw = WRITE_FUA; else rw = WRITE; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 19406a3474c3..4fdede8da0c6 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -157,8 +157,8 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; - if ((cmd->t_task.t_task_cdb[1] & 0x01) && - (cmd->t_task.t_task_cdb[1] & 0x02)) { + if ((cmd->t_task_cdb[1] & 0x01) && + (cmd->t_task_cdb[1] & 0x02)) { printk(KERN_ERR "LongIO and Obselete Bits set, returning" " ILLEGAL_REQUEST\n"); return PYX_TRANSPORT_ILLEGAL_REQUEST; @@ -216,7 +216,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; struct t10_reservation *pr_tmpl = &su_dev->t10_pr; - unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; + unsigned char *cdb = &cmd->t_task_cdb[0]; int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); int conflict = 0; @@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port( struct list_head tid_dest_list; struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; struct target_core_fabric_ops *tmp_tf_ops; - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tpdl, tid_len = 0; @@ -3307,7 +3307,7 @@ static int core_scsi3_emulate_pro_register_and_move( struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; unsigned char *initiator_str; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tid_len, tmp_tid_len; @@ -3723,7 +3723,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) */ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) { - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; u64 res_key, sa_res_key; int sa, scope, type, aptpl; int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; @@ -3830,7 +3830,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) struct se_device *se_dev = cmd->se_dev; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; u32 add_len = 0, off = 8; if (cmd->data_length < 8) { @@ -3885,7 +3885,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) struct se_device *se_dev = cmd->se_dev; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; u64 pr_res_key; u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ @@ -3965,7 +3965,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; u16 add_len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { @@ -4020,7 +4020,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_tmp; struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task.t_task_buf; + unsigned char *buf = (unsigned char *)cmd->t_task_buf; u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; u32 off = 8; /* off into first Full Status descriptor */ int format_code = 0; @@ -4174,7 +4174,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) int core_scsi3_emulate_pr(struct se_cmd *cmd) { - unsigned char *cdb = &cmd->t_task.t_task_cdb[0]; + unsigned char *cdb = &cmd->t_task_cdb[0]; struct se_device *dev = cmd->se_dev; /* * Following spc2r20 5.5.1 Reservations overview: diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index ecfe889cb0ce..3574c520a5f4 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -328,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list( q = sd->request_queue; limits = &dev_limits.limits; limits->logical_block_size = sd->sector_size; - limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? - queue_max_hw_sectors(q) : sd->host->max_sectors; - limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ? - queue_max_sectors(q) : sd->host->max_sectors; + limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); + limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q)); dev_limits.hw_queue_depth = sd->queue_depth; dev_limits.queue_depth = sd->queue_depth; /* @@ -697,7 +695,7 @@ static int pscsi_transport_complete(struct se_task *task) if (task->task_se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = task->task_se_cmd->t_task.t_task_buf; + unsigned char *buf = task->task_se_cmd->t_task_buf; if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -763,7 +761,7 @@ static struct se_task * pscsi_alloc_task(struct se_cmd *cmd) { struct pscsi_plugin_task *pt; - unsigned char *cdb = cmd->t_task.t_task_cdb; + unsigned char *cdb = cmd->t_task_cdb; pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); if (!pt) { @@ -776,7 +774,7 @@ pscsi_alloc_task(struct se_cmd *cmd) * allocate the extended CDB buffer for per struct se_task context * pt->pscsi_cdb now. */ - if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) { + if (cmd->t_task_cdb != cmd->__t_task_cdb) { pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); if (!(pt->pscsi_cdb)) { @@ -889,7 +887,7 @@ static void pscsi_free_task(struct se_task *task) * Release the extended CDB allocation from pscsi_alloc_task() * if one exists. */ - if (cmd->t_task.t_task_cdb != cmd->t_task.__t_task_cdb) + if (cmd->t_task_cdb != cmd->__t_task_cdb) kfree(pt->pscsi_cdb); /* * We do not release the bio(s) here associated with this task, as @@ -1053,7 +1051,7 @@ static void pscsi_bi_endio(struct bio *bio, int error) bio_put(bio); } -static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) +static inline struct bio *pscsi_get_bio(int sg_num) { struct bio *bio; /* @@ -1126,7 +1124,7 @@ static int __pscsi_map_task_SG( /* * Calls bio_kmalloc() and sets bio->bi_end_io() */ - bio = pscsi_get_bio(pdv, nr_vecs); + bio = pscsi_get_bio(nr_vecs); if (!(bio)) goto fail; @@ -1266,7 +1264,7 @@ static int pscsi_map_task_non_SG(struct se_task *task) return 0; ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, - pt->pscsi_req, cmd->t_task.t_task_buf, + pt->pscsi_req, cmd->t_task_buf, task->task_size, GFP_KERNEL); if (ret < 0) { printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 384a8e2083e3..4f9416d5c028 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -737,7 +737,7 @@ check_eot: } out: - task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; #ifdef DEBUG_RAMDISK_DR printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", *se_mem_cnt); @@ -819,7 +819,7 @@ static int rd_DIRECT_without_offset( } out: - task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; #ifdef DEBUG_RAMDISK_DR printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", *se_mem_cnt); @@ -880,14 +880,14 @@ static int rd_DIRECT_do_se_mem_map( * across multiple struct se_task->task_sg[]. */ ret = transport_init_task_sg(task, - list_first_entry(&cmd->t_task.t_mem_list, + list_first_entry(&cmd->t_mem_list, struct se_mem, se_list), task_offset); if (ret <= 0) return ret; return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - list_first_entry(&cmd->t_task.t_mem_list, + list_first_entry(&cmd->t_mem_list, struct se_mem, se_list), out_se_mem, se_mem_cnt, task_offset_in); } diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index e1f99f75ac35..6667e39a35a1 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -178,14 +178,14 @@ int core_tmr_lun_reset( continue; spin_unlock(&dev->se_tmr_lock); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - if (!(atomic_read(&cmd->t_task.t_transport_active))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!(atomic_read(&cmd->t_transport_active))) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_lock(&dev->se_tmr_lock); continue; } @@ -193,7 +193,7 @@ int core_tmr_lun_reset( " Response: 0x%02x, t_state: %d\n", (preempt_and_abort_list) ? "Preempt" : "", tmr_p, tmr_p->function, tmr_p->response, cmd->t_state); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_cmd_finish_abort_tmr(cmd); spin_lock(&dev->se_tmr_lock); @@ -247,38 +247,38 @@ int core_tmr_lun_reset( atomic_set(&task->task_state_active, 0); spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" "def_t_state: %d/%d cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, - cmd->deferred_t_state, cmd->t_task.t_task_cdb[0]); + cmd->deferred_t_state, cmd->t_task_cdb[0]); DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key, - cmd->t_task.t_task_cdbs, - atomic_read(&cmd->t_task.t_task_cdbs_left), - atomic_read(&cmd->t_task.t_task_cdbs_sent), - atomic_read(&cmd->t_task.t_transport_active), - atomic_read(&cmd->t_task.t_transport_stop), - atomic_read(&cmd->t_task.t_transport_sent)); + cmd->t_task_list_num, + atomic_read(&cmd->t_task_cdbs_left), + atomic_read(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" " for dev: %p\n", task, dev); wait_for_completion(&task->task_stop_comp); DEBUG_LR("LUN_RESET Completed task: %p shutdown for" " dev: %p\n", task, dev); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - atomic_dec(&cmd->t_task.t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_dec(&cmd->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -288,24 +288,24 @@ int core_tmr_lun_reset( } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) { + if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task.t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - fe_count = atomic_read(&cmd->t_task.t_fe_count); + fe_count = atomic_read(&cmd->t_fe_count); - if (atomic_read(&cmd->t_task.t_transport_active)) { + if (atomic_read(&cmd->t_transport_active)) { DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&cmd->t_task.t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, + atomic_set(&cmd->t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); @@ -314,8 +314,8 @@ int core_tmr_lun_reset( } DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); - atomic_set(&cmd->t_task.t_transport_aborted, 1); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + atomic_set(&cmd->t_transport_aborted, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -345,7 +345,7 @@ int core_tmr_lun_reset( if (prout_cmd == cmd) continue; - atomic_dec(&cmd->t_task.t_transport_queue_active); + atomic_dec(&cmd->t_transport_queue_active); atomic_dec(&qobj->queue_cnt); list_del(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -353,7 +353,7 @@ int core_tmr_lun_reset( DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, cmd->t_state, - atomic_read(&cmd->t_task.t_fe_count)); + atomic_read(&cmd->t_fe_count)); /* * Signal that the command has failed via cmd->se_cmd_flags, * and call TFO->new_cmd_failure() to wakeup any fabric @@ -365,7 +365,7 @@ int core_tmr_lun_reset( transport_new_cmd_failure(cmd); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, - atomic_read(&cmd->t_task.t_fe_count)); + atomic_read(&cmd->t_fe_count)); spin_lock_irqsave(&qobj->cmd_queue_lock, flags); } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index bf401da05f35..6f2855dac7f8 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -206,20 +206,18 @@ static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); -static u32 transport_generic_get_cdb_count(struct se_cmd *cmd, +static u32 transport_allocate_tasks(struct se_cmd *cmd, unsigned long long starting_lba, u32 sectors, enum dma_data_direction data_direction, struct list_head *mem_list, int set_counts); -static int transport_generic_get_mem(struct se_cmd *cmd, u32 length, - u32 dma_size); +static int transport_generic_get_mem(struct se_cmd *cmd, u32 length); static int transport_generic_remove(struct se_cmd *cmd, int release_to_pool, int session_reinstatement); -static int transport_get_sectors(struct se_cmd *cmd); +static int transport_cmd_get_valid_sectors(struct se_cmd *cmd); static int transport_map_sg_to_mem(struct se_cmd *cmd, - struct list_head *se_mem_list, struct scatterlist *sgl, - u32 *se_mem_cnt); -static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd, - unsigned char *dst, struct list_head *se_mem_list); + struct list_head *se_mem_list, struct scatterlist *sgl); +static void transport_memcpy_se_mem_read_contig(unsigned char *dst, + struct list_head *se_mem_list, u32 len); static void transport_release_fe_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj); @@ -573,7 +571,7 @@ void transport_deregister_session(struct se_session *se_sess) EXPORT_SYMBOL(transport_deregister_session); /* - * Called with cmd->t_task.t_state_lock held. + * Called with cmd->t_state_lock held. */ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) { @@ -581,7 +579,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task_list, t_list) { dev = task->se_dev; if (!(dev)) continue; @@ -599,7 +597,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) spin_unlock_irqrestore(&dev->execute_task_lock, flags); atomic_set(&task->task_state_active, 0); - atomic_dec(&cmd->t_task.t_task_cdbs_ex_left); + atomic_dec(&cmd->t_task_cdbs_ex_left); } } @@ -618,32 +616,32 @@ static int transport_cmd_check_stop( { unsigned long flags; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if IOCTL context caller in requesting the stopping of this * command for LUN shutdown purposes. */ - if (atomic_read(&cmd->t_task.transport_lun_stop)) { - DEBUG_CS("%s:%d atomic_read(&cmd->t_task.transport_lun_stop)" + if (atomic_read(&cmd->transport_lun_stop)) { + DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)" " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); cmd->deferred_t_state = cmd->t_state; cmd->t_state = TRANSPORT_DEFERRED_CMD; - atomic_set(&cmd->t_task.t_transport_active, 0); + atomic_set(&cmd->t_transport_active, 0); if (transport_off == 2) transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_task.transport_lun_stop_comp); + complete(&cmd->transport_lun_stop_comp); return 1; } /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ - if (atomic_read(&cmd->t_task.t_transport_stop)) { - DEBUG_CS("%s:%d atomic_read(&cmd->t_task.t_transport_stop) ==" + if (atomic_read(&cmd->t_transport_stop)) { + DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) ==" " TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); @@ -658,13 +656,13 @@ static int transport_cmd_check_stop( */ if (transport_off == 2) cmd->se_lun = NULL; - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->t_task.t_transport_stop_comp); + complete(&cmd->t_transport_stop_comp); return 1; } if (transport_off) { - atomic_set(&cmd->t_task.t_transport_active, 0); + atomic_set(&cmd->t_transport_active, 0); if (transport_off == 2) { transport_all_task_dev_remove_state(cmd); /* @@ -679,18 +677,18 @@ static int transport_cmd_check_stop( */ if (cmd->se_tfo->check_stop_free != NULL) { spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); cmd->se_tfo->check_stop_free(cmd); return 1; } } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } else if (t_state) cmd->t_state = t_state; - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } @@ -708,21 +706,21 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) if (!lun) return; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - if (!(atomic_read(&cmd->t_task.transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!(atomic_read(&cmd->transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto check_lun; } - atomic_set(&cmd->t_task.transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); check_lun: spin_lock_irqsave(&lun->lun_cmd_lock, flags); - if (atomic_read(&cmd->t_task.transport_lun_active)) { + if (atomic_read(&cmd->transport_lun_active)) { list_del(&cmd->se_lun_node); - atomic_set(&cmd->t_task.transport_lun_active, 0); + atomic_set(&cmd->transport_lun_active, 0); #if 0 printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); @@ -763,15 +761,15 @@ static void transport_add_cmd_to_queue( INIT_LIST_HEAD(&cmd->se_queue_node); if (t_state) { - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = t_state; - atomic_set(&cmd->t_task.t_transport_active, 1); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + atomic_set(&cmd->t_transport_active, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); - atomic_inc(&cmd->t_task.t_transport_queue_active); + atomic_inc(&cmd->t_transport_queue_active); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); atomic_inc(&qobj->queue_cnt); @@ -791,7 +789,7 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) } cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); - atomic_dec(&cmd->t_task.t_transport_queue_active); + atomic_dec(&cmd->t_transport_queue_active); list_del(&cmd->se_queue_node); atomic_dec(&qobj->queue_cnt); @@ -807,24 +805,24 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (!(atomic_read(&cmd->t_task.t_transport_queue_active))) { + if (!(atomic_read(&cmd->t_transport_queue_active))) { spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } list_for_each_entry(t, &qobj->qobj_list, se_queue_node) if (t == cmd) { - atomic_dec(&cmd->t_task.t_transport_queue_active); + atomic_dec(&cmd->t_transport_queue_active); atomic_dec(&qobj->queue_cnt); list_del(&cmd->se_queue_node); break; } spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - if (atomic_read(&cmd->t_task.t_transport_queue_active)) { + if (atomic_read(&cmd->t_transport_queue_active)) { printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", cmd->se_tfo->get_task_tag(cmd), - atomic_read(&cmd->t_task.t_transport_queue_active)); + atomic_read(&cmd->t_transport_queue_active)); } } @@ -834,7 +832,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, */ void transport_complete_sync_cache(struct se_cmd *cmd, int good) { - struct se_task *task = list_entry(cmd->t_task.t_task_list.next, + struct se_task *task = list_entry(cmd->t_task_list.next, struct se_task, t_list); if (good) { @@ -864,12 +862,12 @@ void transport_complete_task(struct se_task *task, int success) unsigned long flags; #if 0 printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, - cmd->t_task.t_task_cdb[0], dev); + cmd->t_task_cdb[0], dev); #endif if (dev) atomic_inc(&dev->depth_left); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_set(&task->task_active, 0); /* @@ -891,14 +889,14 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_stop)) { /* - * Decrement cmd->t_task.t_se_count if this task had + * Decrement cmd->t_se_count if this task had * previously thrown its timeout exception handler. */ if (atomic_read(&task->task_timeout)) { - atomic_dec(&cmd->t_task.t_se_count); + atomic_dec(&cmd->t_se_count); atomic_set(&task->task_timeout, 0); } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&task->task_stop_comp); return; @@ -910,33 +908,33 @@ void transport_complete_task(struct se_task *task, int success) */ if (atomic_read(&task->task_timeout)) { if (!(atomic_dec_and_test( - &cmd->t_task.t_task_cdbs_timeout_left))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, + &cmd->t_task_cdbs_timeout_left))) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } t_state = TRANSPORT_COMPLETE_TIMEOUT; - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); return; } - atomic_dec(&cmd->t_task.t_task_cdbs_timeout_left); + atomic_dec(&cmd->t_task_cdbs_timeout_left); /* * Decrement the outstanding t_task_cdbs_left count. The last * struct se_task from struct se_cmd will complete itself into the * device queue depending upon int success. */ - if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) { + if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { if (!success) - cmd->t_task.t_tasks_failed = 1; + cmd->t_tasks_failed = 1; - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - if (!success || cmd->t_task.t_tasks_failed) { + if (!success || cmd->t_tasks_failed) { t_state = TRANSPORT_COMPLETE_FAILURE; if (!task->task_error_status) { task->task_error_status = @@ -945,10 +943,10 @@ void transport_complete_task(struct se_task *task, int success) PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } } else { - atomic_set(&cmd->t_task.t_transport_complete, 1); + atomic_set(&cmd->t_transport_complete, 1); t_state = TRANSPORT_COMPLETE_OK; } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, t_state); } @@ -1041,8 +1039,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) struct se_task *task; unsigned long flags; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_for_each_entry(task, &cmd->t_task_list, t_list) { dev = task->se_dev; if (atomic_read(&task->task_state_active)) @@ -1058,7 +1056,7 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) spin_unlock(&dev->execute_task_lock); } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static void transport_add_tasks_from_cmd(struct se_cmd *cmd) @@ -1068,7 +1066,7 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd) unsigned long flags; spin_lock_irqsave(&dev->execute_task_lock, flags); - list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task_list, t_list) { if (atomic_read(&task->task_execute_queue)) continue; /* @@ -1670,14 +1668,13 @@ transport_generic_get_task(struct se_cmd *cmd, INIT_LIST_HEAD(&task->t_execute_list); INIT_LIST_HEAD(&task->t_state_list); init_completion(&task->task_stop_comp); - task->task_no = cmd->t_task.t_tasks_no++; task->task_se_cmd = cmd; task->se_dev = dev; task->task_data_direction = data_direction; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task.t_task_list); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return task; } @@ -1701,14 +1698,14 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_ordered_node); - INIT_LIST_HEAD(&cmd->t_task.t_mem_list); - INIT_LIST_HEAD(&cmd->t_task.t_mem_bidi_list); - INIT_LIST_HEAD(&cmd->t_task.t_task_list); - init_completion(&cmd->t_task.transport_lun_fe_stop_comp); - init_completion(&cmd->t_task.transport_lun_stop_comp); - init_completion(&cmd->t_task.t_transport_stop_comp); - spin_lock_init(&cmd->t_task.t_state_lock); - atomic_set(&cmd->t_task.transport_dev_active, 1); + INIT_LIST_HEAD(&cmd->t_mem_list); + INIT_LIST_HEAD(&cmd->t_mem_bidi_list); + INIT_LIST_HEAD(&cmd->t_task_list); + init_completion(&cmd->transport_lun_fe_stop_comp); + init_completion(&cmd->transport_lun_stop_comp); + init_completion(&cmd->t_transport_stop_comp); + spin_lock_init(&cmd->t_state_lock); + atomic_set(&cmd->transport_dev_active, 1); cmd->se_tfo = tfo; cmd->se_sess = se_sess; @@ -1753,8 +1750,8 @@ void transport_free_se_cmd( /* * Check and free any extended CDB buffer that was allocated */ - if (se_cmd->t_task.t_task_cdb != se_cmd->t_task.__t_task_cdb) - kfree(se_cmd->t_task.t_task_cdb); + if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) + kfree(se_cmd->t_task_cdb); } EXPORT_SYMBOL(transport_free_se_cmd); @@ -1792,26 +1789,26 @@ int transport_generic_allocate_tasks( * allocate the additional extended CDB buffer now.. Otherwise * setup the pointer from __t_task_cdb to t_task_cdb. */ - if (scsi_command_size(cdb) > sizeof(cmd->t_task.__t_task_cdb)) { - cmd->t_task.t_task_cdb = kzalloc(scsi_command_size(cdb), + if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { + cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(cmd->t_task.t_task_cdb)) { - printk(KERN_ERR "Unable to allocate cmd->t_task.t_task_cdb" - " %u > sizeof(cmd->t_task.__t_task_cdb): %lu ops\n", + if (!(cmd->t_task_cdb)) { + printk(KERN_ERR "Unable to allocate cmd->t_task_cdb" + " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), - (unsigned long)sizeof(cmd->t_task.__t_task_cdb)); + (unsigned long)sizeof(cmd->__t_task_cdb)); return -ENOMEM; } } else - cmd->t_task.t_task_cdb = &cmd->t_task.__t_task_cdb[0]; + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; /* - * Copy the original CDB into cmd->t_task. + * Copy the original CDB into cmd-> */ - memcpy(cmd->t_task.t_task_cdb, cdb, scsi_command_size(cdb)); + memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); /* * Setup the received CDB based on SCSI defined opcodes and * perform unit attention, persistent reservations and ALUA - * checks for virtual device backends. The cmd->t_task.t_task_cdb + * checks for virtual device backends. The cmd->t_task_cdb * pointer is expected to be setup before we reach this point. */ ret = transport_generic_cmd_sequencer(cmd, cdb); @@ -1845,7 +1842,6 @@ int transport_generic_handle_cdb( printk(KERN_ERR "cmd->se_lun is NULL\n"); return -EINVAL; } - transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); return 0; } @@ -1936,9 +1932,9 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) /* * No tasks remain in the execution queue */ - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task.t_task_list, t_list) { + &cmd->t_task_list, t_list) { DEBUG_TS("task_no[%d] - Processing task %p\n", task->task_no, task); /* @@ -1947,14 +1943,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (!atomic_read(&task->task_sent) && !atomic_read(&task->task_active)) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_remove_task_from_execute_queue(task, task->se_dev); DEBUG_TS("task_no[%d] - Removed from execute queue\n", task->task_no); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); continue; } @@ -1964,7 +1960,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) */ if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); DEBUG_TS("task_no[%d] - Waiting to complete\n", @@ -1973,8 +1969,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) DEBUG_TS("task_no[%d] - Stopped successfully\n", task->task_no); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - atomic_dec(&cmd->t_task.t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_dec(&cmd->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -1985,7 +1981,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) __transport_stop_task_timer(task, &flags); } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return ret; } @@ -2001,7 +1997,7 @@ static void transport_generic_request_failure( { DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), - cmd->t_task.t_task_cdb[0]); + cmd->t_task_cdb[0]); DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" " %d/%d transport_error_status: %d\n", cmd->se_tfo->get_cmd_state(cmd), @@ -2010,13 +2006,13 @@ static void transport_generic_request_failure( DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" - " t_transport_sent: %d\n", cmd->t_task.t_task_cdbs, - atomic_read(&cmd->t_task.t_task_cdbs_left), - atomic_read(&cmd->t_task.t_task_cdbs_sent), - atomic_read(&cmd->t_task.t_task_cdbs_ex_left), - atomic_read(&cmd->t_task.t_transport_active), - atomic_read(&cmd->t_task.t_transport_stop), - atomic_read(&cmd->t_task.t_transport_sent)); + " t_transport_sent: %d\n", cmd->t_task_cdbs, + atomic_read(&cmd->t_task_cdbs_left), + atomic_read(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_task_cdbs_ex_left), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); transport_stop_all_task_timers(cmd); @@ -2098,7 +2094,7 @@ static void transport_generic_request_failure( break; default: printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", - cmd->t_task.t_task_cdb[0], + cmd->t_task_cdb[0], cmd->transport_error_status); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; @@ -2119,19 +2115,19 @@ static void transport_direct_request_timeout(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - if (!(atomic_read(&cmd->t_task.t_transport_timeout))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!(atomic_read(&cmd->t_transport_timeout))) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - if (atomic_read(&cmd->t_task.t_task_cdbs_timeout_left)) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - atomic_sub(atomic_read(&cmd->t_task.t_transport_timeout), - &cmd->t_task.t_se_count); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + atomic_sub(atomic_read(&cmd->t_transport_timeout), + &cmd->t_se_count); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static void transport_generic_request_timeout(struct se_cmd *cmd) @@ -2139,16 +2135,16 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) unsigned long flags; /* - * Reset cmd->t_task.t_se_count to allow transport_generic_remove() + * Reset cmd->t_se_count to allow transport_generic_remove() * to allow last call to free memory resources. */ - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - if (atomic_read(&cmd->t_task.t_transport_timeout) > 1) { - int tmp = (atomic_read(&cmd->t_task.t_transport_timeout) - 1); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_transport_timeout) > 1) { + int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); - atomic_sub(tmp, &cmd->t_task.t_se_count); + atomic_sub(tmp, &cmd->t_se_count); } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_generic_remove(cmd, 0, 0); } @@ -2164,8 +2160,8 @@ transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) return -ENOMEM; } - cmd->t_task.t_tasks_se_num = 0; - cmd->t_task.t_task_buf = buf; + cmd->t_tasks_se_num = 0; + cmd->t_task_buf = buf; return 0; } @@ -2207,9 +2203,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd) { unsigned long flags; - spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; - spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); } /* @@ -2223,9 +2219,9 @@ static void transport_task_timeout_handler(unsigned long data) DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (task->task_flags & TF_STOP) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } task->task_flags &= ~TF_RUNNING; @@ -2236,13 +2232,13 @@ static void transport_task_timeout_handler(unsigned long data) if (!(atomic_read(&task->task_active))) { DEBUG_TT("transport task: %p cmd: %p timeout task_active" " == 0\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - atomic_inc(&cmd->t_task.t_se_count); - atomic_inc(&cmd->t_task.t_transport_timeout); - cmd->t_task.t_tasks_failed = 1; + atomic_inc(&cmd->t_se_count); + atomic_inc(&cmd->t_transport_timeout); + cmd->t_tasks_failed = 1; atomic_set(&task->task_timeout, 1); task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; @@ -2251,28 +2247,28 @@ static void transport_task_timeout_handler(unsigned long data) if (atomic_read(&task->task_stop)) { DEBUG_TT("transport task: %p cmd: %p timeout task_stop" " == 1\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&task->task_stop_comp); return; } - if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_left))) { + if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { DEBUG_TT("transport task: %p cmd: %p timeout non zero" " t_task_cdbs_left\n", task, cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", task, cmd); cmd->t_state = TRANSPORT_COMPLETE_FAILURE; - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); } /* - * Called with cmd->t_task.t_state_lock held. + * Called with cmd->t_state_lock held. */ static void transport_start_task_timer(struct se_task *task) { @@ -2302,7 +2298,7 @@ static void transport_start_task_timer(struct se_task *task) } /* - * Called with spin_lock_irq(&cmd->t_task.t_state_lock) held. + * Called with spin_lock_irq(&cmd->t_state_lock) held. */ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) { @@ -2312,11 +2308,11 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) return; task->task_flags |= TF_STOP; - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, *flags); + spin_unlock_irqrestore(&cmd->t_state_lock, *flags); del_timer_sync(&task->task_timer); - spin_lock_irqsave(&cmd->t_task.t_state_lock, *flags); + spin_lock_irqsave(&cmd->t_state_lock, *flags); task->task_flags &= ~TF_RUNNING; task->task_flags &= ~TF_STOP; } @@ -2326,11 +2322,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd) struct se_task *task = NULL, *task_tmp; unsigned long flags; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task.t_task_list, t_list) + &cmd->t_task_list, t_list) __transport_stop_task_timer(task, &flags); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static inline int transport_tcq_window_closed(struct se_device *dev) @@ -2365,7 +2361,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) smp_mb__after_atomic_inc(); DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", - cmd->t_task->t_task_cdb[0], + cmd->_task_cdb[0], cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { @@ -2379,7 +2375,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" " list, se_ordered_id: %u\n", - cmd->t_task.t_task_cdb[0], + cmd->t_task_cdb[0], cmd->se_ordered_id); /* * Add ORDERED command to tail of execution queue if @@ -2413,7 +2409,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" " delayed CMD list, se_ordered_id: %u\n", - cmd->t_task.t_task_cdb[0], cmd->sam_task_attr, + cmd->t_task_cdb[0], cmd->sam_task_attr, cmd->se_ordered_id); /* * Return zero to let transport_execute_tasks() know @@ -2487,7 +2483,7 @@ static int __transport_execute_tasks(struct se_device *dev) /* * Check if there is enough room in the device and HBA queue to send - * struct se_transport_task's to the selected transport. + * struct se_tasks to the selected transport. */ check_depth: if (!atomic_read(&dev->depth_left)) @@ -2511,17 +2507,17 @@ check_depth: cmd = task->task_se_cmd; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_set(&task->task_active, 1); atomic_set(&task->task_sent, 1); - atomic_inc(&cmd->t_task.t_task_cdbs_sent); + atomic_inc(&cmd->t_task_cdbs_sent); - if (atomic_read(&cmd->t_task.t_task_cdbs_sent) == - cmd->t_task.t_task_cdbs) + if (atomic_read(&cmd->t_task_cdbs_sent) == + cmd->t_task_list_num) atomic_set(&cmd->transport_sent, 1); transport_start_task_timer(task); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); /* * The struct se_cmd->transport_emulate_cdb() function pointer is used * to grab REPORT_LUNS and other CDBs we want to handle before they hit the @@ -2586,10 +2582,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) * Any unsolicited data will get dumped for failed command inside of * the fabric plugin */ - spin_lock_irqsave(&se_cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&se_cmd->t_state_lock, flags); se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - spin_unlock_irqrestore(&se_cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); se_cmd->se_tfo->new_cmd_failure(se_cmd); } @@ -2799,17 +2795,18 @@ static void transport_xor_callback(struct se_cmd *cmd) return; } /* - * Copy the scatterlist WRITE buffer located at cmd->t_task.t_mem_list + * Copy the scatterlist WRITE buffer located at cmd->t_mem_list * into the locally allocated *buf */ - transport_memcpy_se_mem_read_contig(cmd, buf, &cmd->t_task.t_mem_list); + transport_memcpy_se_mem_read_contig(buf, &cmd->t_mem_list, + cmd->data_length); /* * Now perform the XOR against the BIDI read memory located at - * cmd->t_task.t_mem_bidi_list + * cmd->t_mem_bidi_list */ offset = 0; - list_for_each_entry(se_mem, &cmd->t_task.t_mem_bidi_list, se_list) { + list_for_each_entry(se_mem, &cmd->t_mem_bidi_list, se_list) { addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); if (!(addr)) goto out; @@ -2837,14 +2834,14 @@ static int transport_get_sense_data(struct se_cmd *cmd) WARN_ON(!cmd->se_lun); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } list_for_each_entry_safe(task, task_tmp, - &cmd->t_task.t_task_list, t_list) { + &cmd->t_task_list, t_list) { if (!task->task_sense) continue; @@ -2866,7 +2863,7 @@ static int transport_get_sense_data(struct se_cmd *cmd) cmd->se_tfo->get_task_tag(cmd), task->task_no); continue; } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); @@ -2884,7 +2881,7 @@ static int transport_get_sense_data(struct se_cmd *cmd) cmd->scsi_status); return 0; } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return -1; } @@ -2895,7 +2892,7 @@ static int transport_allocate_resources(struct se_cmd *cmd) if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) - return transport_generic_get_mem(cmd, length, PAGE_SIZE); + return transport_generic_get_mem(cmd, length); else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) return transport_generic_allocate_buf(cmd, length); else @@ -2999,7 +2996,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - cmd->t_task.t_task_lba = transport_lba_21(cdb); + cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_10: @@ -3008,7 +3005,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task.t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_12: @@ -3017,7 +3014,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - cmd->t_task.t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case READ_16: @@ -3026,7 +3023,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - cmd->t_task.t_task_lba = transport_lba_64(cdb); + cmd->t_task_lba = transport_lba_64(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_6: @@ -3035,7 +3032,7 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_6; - cmd->t_task.t_task_lba = transport_lba_21(cdb); + cmd->t_task_lba = transport_lba_21(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_10: @@ -3044,8 +3041,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task.t_task_lba = transport_lba_32(cdb); - cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_12: @@ -3054,8 +3051,8 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_12; - cmd->t_task.t_task_lba = transport_lba_32(cdb); - cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_32(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case WRITE_16: @@ -3064,20 +3061,20 @@ static int transport_generic_cmd_sequencer( goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_16; - cmd->t_task.t_task_lba = transport_lba_64(cdb); - cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); + cmd->t_task_lba = transport_lba_64(cdb); + cmd->t_tasks_fua = (cdb[1] & 0x8); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; break; case XDWRITEREAD_10: if ((cmd->data_direction != DMA_TO_DEVICE) || - !(cmd->t_task.t_tasks_bidi)) + !(cmd->t_tasks_bidi)) goto out_invalid_cdb_field; sectors = transport_get_sectors_10(cdb, cmd, §or_ret); if (sector_ret) goto out_unsupported_cdb; size = transport_get_size(sectors, cdb, cmd); cmd->transport_split_cdb = &split_cdb_XX_10; - cmd->t_task.t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); @@ -3090,7 +3087,7 @@ static int transport_generic_cmd_sequencer( * Setup BIDI XOR callback to be run during transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_task.t_tasks_fua = (cdb[1] & 0x8); + cmd->t_tasks_fua = (cdb[1] & 0x8); break; case VARIABLE_LENGTH_CMD: service_action = get_unaligned_be16(&cdb[8]); @@ -3112,7 +3109,7 @@ static int transport_generic_cmd_sequencer( * XDWRITE_READ_32 logic. */ cmd->transport_split_cdb = &split_cdb_XX_32; - cmd->t_task.t_task_lba = transport_lba_64_ext(cdb); + cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; /* @@ -3126,7 +3123,7 @@ static int transport_generic_cmd_sequencer( * transport_generic_complete_ok() */ cmd->transport_complete_callback = &transport_xor_callback; - cmd->t_task.t_tasks_fua = (cdb[10] & 0x8); + cmd->t_tasks_fua = (cdb[10] & 0x8); break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb, cmd, §or_ret); @@ -3138,7 +3135,7 @@ static int transport_generic_cmd_sequencer( else size = dev->se_sub_dev->se_dev_attrib.block_size; - cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[12]); + cmd->t_task_lba = get_unaligned_be64(&cdb[12]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; /* @@ -3373,10 +3370,10 @@ static int transport_generic_cmd_sequencer( */ if (cdb[0] == SYNCHRONIZE_CACHE) { sectors = transport_get_sectors_10(cdb, cmd, §or_ret); - cmd->t_task.t_task_lba = transport_lba_32(cdb); + cmd->t_task_lba = transport_lba_32(cdb); } else { sectors = transport_get_sectors_16(cdb, cmd, §or_ret); - cmd->t_task.t_task_lba = transport_lba_64(cdb); + cmd->t_task_lba = transport_lba_64(cdb); } if (sector_ret) goto out_unsupported_cdb; @@ -3398,7 +3395,7 @@ static int transport_generic_cmd_sequencer( * Check to ensure that LBA + Range does not exceed past end of * device. */ - if (transport_get_sectors(cmd) < 0) + if (!transport_cmd_get_valid_sectors(cmd)) goto out_invalid_cdb_field; break; case UNMAP: @@ -3427,7 +3424,7 @@ static int transport_generic_cmd_sequencer( else size = dev->se_sub_dev->se_dev_attrib.block_size; - cmd->t_task.t_task_lba = get_unaligned_be64(&cdb[2]); + cmd->t_task_lba = get_unaligned_be16(&cdb[2]); passthrough = (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV); /* @@ -3542,88 +3539,22 @@ out_invalid_cdb_field: static inline void transport_release_tasks(struct se_cmd *); -/* - * This function will copy a contiguous *src buffer into a destination - * struct scatterlist array. - */ -static void transport_memcpy_write_contig( - struct se_cmd *cmd, - struct scatterlist *sg_d, - unsigned char *src) -{ - u32 i = 0, length = 0, total_length = cmd->data_length; - void *dst; - - while (total_length) { - length = sg_d[i].length; - - if (length > total_length) - length = total_length; - - dst = sg_virt(&sg_d[i]); - - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - - src += length; - i++; - } -} - -/* - * This function will copy a struct scatterlist array *sg_s into a destination - * contiguous *dst buffer. - */ -static void transport_memcpy_read_contig( - struct se_cmd *cmd, - unsigned char *dst, - struct scatterlist *sg_s) -{ - u32 i = 0, length = 0, total_length = cmd->data_length; - void *src; - - while (total_length) { - length = sg_s[i].length; - - if (length > total_length) - length = total_length; - - src = sg_virt(&sg_s[i]); - - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - - dst += length; - i++; - } -} - static void transport_memcpy_se_mem_read_contig( - struct se_cmd *cmd, unsigned char *dst, - struct list_head *se_mem_list) + struct list_head *se_mem_list, + u32 tot_len) { struct se_mem *se_mem; void *src; - u32 length = 0, total_length = cmd->data_length; + u32 length; list_for_each_entry(se_mem, se_mem_list, se_list) { - length = se_mem->se_len; - - if (length > total_length) - length = total_length; - + length = min_t(u32, se_mem->se_len, tot_len); src = page_address(se_mem->se_page) + se_mem->se_off; - memcpy(dst, src, length); - - if (!(total_length -= length)) - return; - + tot_len -= length; + if (!tot_len) + break; dst += length; } } @@ -3744,14 +3675,15 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) } spin_unlock(&cmd->se_lun->lun_sep_lock); /* - * If enabled by TCM fabirc module pre-registered SGL + * If enabled by TCM fabric module pre-registered SGL * memory, perform the memcpy() from the TCM internal - * contigious buffer back to the original SGL. + * contiguous buffer back to the original SGL. */ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) - transport_memcpy_write_contig(cmd, - cmd->t_task.t_task_pt_sgl, - cmd->t_task.t_task_buf); + sg_copy_from_buffer(cmd->t_task_pt_sgl, + cmd->t_task_pt_sgl_num, + cmd->t_task_buf, + cmd->data_length); cmd->se_tfo->queue_data_in(cmd); break; @@ -3765,7 +3697,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (!list_empty(&cmd->t_task.t_mem_bidi_list)) { + if (!list_empty(&cmd->t_mem_bidi_list)) { spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) { cmd->se_lun->lun_sep->sep_stats.tx_data_octets += @@ -3792,9 +3724,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) struct se_task *task, *task_tmp; unsigned long flags; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, - &cmd->t_task.t_task_list, t_list) { + &cmd->t_task_list, t_list) { if (atomic_read(&task->task_active)) continue; @@ -3803,15 +3735,15 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) list_del(&task->t_list); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (task->se_dev) task->se_dev->transport->free_task(task); else printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", task->task_no); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } static inline void transport_free_pages(struct se_cmd *cmd) @@ -3824,9 +3756,9 @@ static inline void transport_free_pages(struct se_cmd *cmd) if (cmd->se_dev->transport->do_se_mem_map) free_page = 0; - if (cmd->t_task.t_task_buf) { - kfree(cmd->t_task.t_task_buf); - cmd->t_task.t_task_buf = NULL; + if (cmd->t_task_buf) { + kfree(cmd->t_task_buf); + cmd->t_task_buf = NULL; return; } @@ -3837,7 +3769,7 @@ static inline void transport_free_pages(struct se_cmd *cmd) return; list_for_each_entry_safe(se_mem, se_mem_tmp, - &cmd->t_task.t_mem_list, se_list) { + &cmd->t_mem_list, se_list) { /* * We only release call __free_page(struct se_mem->se_page) when * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, @@ -3848,10 +3780,10 @@ static inline void transport_free_pages(struct se_cmd *cmd) list_del(&se_mem->se_list); kmem_cache_free(se_mem_cache, se_mem); } - cmd->t_task.t_tasks_se_num = 0; + cmd->t_tasks_se_num = 0; list_for_each_entry_safe(se_mem, se_mem_tmp, - &cmd->t_task.t_mem_bidi_list, se_list) { + &cmd->t_mem_bidi_list, se_list) { /* * We only release call __free_page(struct se_mem->se_page) when * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, @@ -3862,7 +3794,7 @@ static inline void transport_free_pages(struct se_cmd *cmd) list_del(&se_mem->se_list); kmem_cache_free(se_mem_cache, se_mem); } - cmd->t_task.t_tasks_se_bidi_num = 0; + cmd->t_tasks_se_bidi_num = 0; } static inline void transport_release_tasks(struct se_cmd *cmd) @@ -3874,23 +3806,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) { unsigned long flags; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - if (atomic_read(&cmd->t_task.t_fe_count)) { - if (!(atomic_dec_and_test(&cmd->t_task.t_fe_count))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_fe_count)) { + if (!(atomic_dec_and_test(&cmd->t_fe_count))) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 1; } } - if (atomic_read(&cmd->t_task.t_se_count)) { - if (!(atomic_dec_and_test(&cmd->t_task.t_se_count))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, + if (atomic_read(&cmd->t_se_count)) { + if (!(atomic_dec_and_test(&cmd->t_se_count))) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 1; } } - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } @@ -3902,14 +3834,14 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) if (transport_dec_and_check(cmd)) return; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - if (!(atomic_read(&cmd->t_task.transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!(atomic_read(&cmd->transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto free_pages; } - atomic_set(&cmd->t_task.transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_release_tasks(cmd); free_pages: @@ -3927,22 +3859,22 @@ static int transport_generic_remove( if (transport_dec_and_check(cmd)) { if (session_reinstatement) { - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } return 1; } - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - if (!(atomic_read(&cmd->t_task.transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (!(atomic_read(&cmd->transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto free_pages; } - atomic_set(&cmd->t_task.transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_release_tasks(cmd); @@ -3977,7 +3909,6 @@ int transport_generic_map_mem_to_cmd( struct scatterlist *sgl_bidi, u32 sgl_bidi_count) { - u32 mapped_sg_count = 0; int ret; if (!sgl || !sgl_count) @@ -3993,24 +3924,20 @@ int transport_generic_map_mem_to_cmd( * processed into a TCM struct se_subsystem_dev, we do the mapping * from the passed physical memory to struct se_mem->se_page here. */ - ret = transport_map_sg_to_mem(cmd, - &cmd->t_task.t_mem_list, sgl, &mapped_sg_count); + ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_list, sgl); if (ret < 0) return -ENOMEM; - cmd->t_task.t_tasks_se_num = mapped_sg_count; + cmd->t_tasks_se_num = ret; /* * Setup BIDI READ list of struct se_mem elements */ if (sgl_bidi && sgl_bidi_count) { - mapped_sg_count = 0; - ret = transport_map_sg_to_mem(cmd, - &cmd->t_task.t_mem_bidi_list, sgl_bidi, - &mapped_sg_count); + ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_bidi_list, sgl_bidi); if (ret < 0) return -ENOMEM; - cmd->t_task.t_tasks_se_bidi_num = mapped_sg_count; + cmd->t_tasks_se_bidi_num = ret; } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; @@ -4021,7 +3948,7 @@ int transport_generic_map_mem_to_cmd( return -ENOSYS; } /* - * For incoming CDBs using a contiguous buffer internall with TCM, + * For incoming CDBs using a contiguous buffer internal with TCM, * save the passed struct scatterlist memory. After TCM storage object * processing has completed for this struct se_cmd, TCM core will call * transport_memcpy_[write,read]_contig() as necessary from @@ -4030,8 +3957,8 @@ int transport_generic_map_mem_to_cmd( * struct scatterlist format. */ cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; - cmd->t_task.t_task_pt_sgl = sgl; - /* don't need sgl count? We assume it contains cmd->data_length data */ + cmd->t_task_pt_sgl = sgl; + cmd->t_task_pt_sgl_num = sgl_count; } return 0; @@ -4044,54 +3971,51 @@ static inline long long transport_dev_end_lba(struct se_device *dev) return dev->transport->get_blocks(dev) + 1; } -static int transport_get_sectors(struct se_cmd *cmd) +static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - - cmd->t_task.t_tasks_sectors = - (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); - if (!(cmd->t_task.t_tasks_sectors)) - cmd->t_task.t_tasks_sectors = 1; + u32 sectors; if (dev->transport->get_device_type(dev) != TYPE_DISK) return 0; - if ((cmd->t_task.t_task_lba + cmd->t_task.t_tasks_sectors) > + sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); + + if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" " transport_dev_end_lba(): %llu\n", - cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors, + cmd->t_task_lba, sectors, transport_dev_end_lba(dev)); - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY; - return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS; + return 0; } - return 0; + return sectors; } static int transport_new_cmd_obj(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - u32 task_cdbs = 0, rc; + u32 task_cdbs; + u32 rc; if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { - task_cdbs++; - cmd->t_task.t_task_cdbs++; + task_cdbs = 1; + cmd->t_task_list_num = 1; } else { int set_counts = 1; /* * Setup any BIDI READ tasks and memory from - * cmd->t_task.t_mem_bidi_list so the READ struct se_tasks + * cmd->t_mem_bidi_list so the READ struct se_tasks * are queued first for the non pSCSI passthrough case. */ - if (!list_empty(&cmd->t_task.t_mem_bidi_list) && + if (!list_empty(&cmd->t_mem_bidi_list) && (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { - rc = transport_generic_get_cdb_count(cmd, - cmd->t_task.t_task_lba, - cmd->t_task.t_tasks_sectors, - DMA_FROM_DEVICE, &cmd->t_task.t_mem_bidi_list, + rc = transport_allocate_tasks(cmd, + cmd->t_task_lba, + transport_cmd_get_valid_sectors(cmd), + DMA_FROM_DEVICE, &cmd->t_mem_bidi_list, set_counts); if (!(rc)) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -4102,13 +4026,13 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) set_counts = 0; } /* - * Setup the tasks and memory from cmd->t_task.t_mem_list + * Setup the tasks and memory from cmd->t_mem_list * Note for BIDI transfers this will contain the WRITE payload */ - task_cdbs = transport_generic_get_cdb_count(cmd, - cmd->t_task.t_task_lba, - cmd->t_task.t_tasks_sectors, - cmd->data_direction, &cmd->t_task.t_mem_list, + task_cdbs = transport_allocate_tasks(cmd, + cmd->t_task_lba, + transport_cmd_get_valid_sectors(cmd), + cmd->data_direction, &cmd->t_mem_list, set_counts); if (!(task_cdbs)) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; @@ -4116,26 +4040,25 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return PYX_TRANSPORT_LU_COMM_FAILURE; } - cmd->t_task.t_task_cdbs += task_cdbs; + cmd->t_task_list_num = task_cdbs; #if 0 printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, - cmd->t_task.t_task_lba, cmd->t_task.t_tasks_sectors, - cmd->t_task.t_task_cdbs); + cmd->t_task_lba, cmd->t_tasks_sectors, + cmd->t_task_cdbs); #endif } - atomic_set(&cmd->t_task.t_task_cdbs_left, task_cdbs); - atomic_set(&cmd->t_task.t_task_cdbs_ex_left, task_cdbs); - atomic_set(&cmd->t_task.t_task_cdbs_timeout_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); + atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); return 0; } static int -transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) +transport_generic_get_mem(struct se_cmd *cmd, u32 length) { - unsigned char *buf; struct se_mem *se_mem; /* @@ -4152,24 +4075,16 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) } /* #warning FIXME Allocate contigous pages for struct se_mem elements */ - se_mem->se_page = alloc_pages(GFP_KERNEL, 0); + se_mem->se_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); if (!(se_mem->se_page)) { printk(KERN_ERR "alloc_pages() failed\n"); goto out; } - buf = kmap_atomic(se_mem->se_page, KM_IRQ0); - if (!(buf)) { - printk(KERN_ERR "kmap_atomic() failed\n"); - goto out; - } INIT_LIST_HEAD(&se_mem->se_list); - se_mem->se_len = (length > dma_size) ? dma_size : length; - memset(buf, 0, se_mem->se_len); - kunmap_atomic(buf, KM_IRQ0); - - list_add_tail(&se_mem->se_list, &cmd->t_task.t_mem_list); - cmd->t_task.t_tasks_se_num++; + se_mem->se_len = min_t(u32, length, PAGE_SIZE); + list_add_tail(&se_mem->se_list, &cmd->t_mem_list); + cmd->t_tasks_se_num++; DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" " Offset(%u)\n", se_mem->se_page, se_mem->se_len, @@ -4179,7 +4094,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size) } DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", - cmd->t_task.t_tasks_se_num); + cmd->t_tasks_se_num); return 0; out: @@ -4211,7 +4126,7 @@ int transport_init_task_sg( sg_length = se_mem->se_len; if (!(list_is_last(&se_mem->se_list, - &se_cmd->t_task.t_mem_list))) + &se_cmd->t_mem_list))) se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); } else { @@ -4231,7 +4146,7 @@ int transport_init_task_sg( sg_length = (se_mem->se_len - task_offset); if (!(list_is_last(&se_mem->se_list, - &se_cmd->t_task.t_mem_list))) + &se_cmd->t_mem_list))) se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); } @@ -4272,7 +4187,7 @@ next: * Setup task->task_sg_bidi for SCSI READ payload for * TCM/pSCSI passthrough if present for BIDI-COMMAND */ - if (!list_empty(&se_cmd->t_task.t_mem_bidi_list) && + if (!list_empty(&se_cmd->t_mem_bidi_list) && (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { task->task_sg_bidi = kzalloc(task_sg_num_padded * sizeof(struct scatterlist), GFP_KERNEL); @@ -4308,59 +4223,19 @@ next: return task->task_sg_num; } -static inline int transport_set_tasks_sectors_disk( - struct se_task *task, +/* Reduce sectors if they are too long for the device */ +static inline sector_t transport_limit_task_sectors( struct se_device *dev, unsigned long long lba, - u32 sectors, - int *max_sectors_set) + sector_t sectors) { - if ((lba + sectors) > transport_dev_end_lba(dev)) { - task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1); + sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); - if (task->task_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { - task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; - *max_sectors_set = 1; - } - } else { - if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { - task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; - *max_sectors_set = 1; - } else - task->task_sectors = sectors; - } + if (dev->transport->get_device_type(dev) == TYPE_DISK) + if ((lba + sectors) > transport_dev_end_lba(dev)) + sectors = ((transport_dev_end_lba(dev) - lba) + 1); - return 0; -} - -static inline int transport_set_tasks_sectors_non_disk( - struct se_task *task, - struct se_device *dev, - unsigned long long lba, - u32 sectors, - int *max_sectors_set) -{ - if (sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { - task->task_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; - *max_sectors_set = 1; - } else - task->task_sectors = sectors; - - return 0; -} - -static inline int transport_set_tasks_sectors( - struct se_task *task, - struct se_device *dev, - unsigned long long lba, - u32 sectors, - int *max_sectors_set) -{ - return (dev->transport->get_device_type(dev) == TYPE_DISK) ? - transport_set_tasks_sectors_disk(task, dev, lba, sectors, - max_sectors_set) : - transport_set_tasks_sectors_non_disk(task, dev, lba, sectors, - max_sectors_set); + return sectors; } /* @@ -4369,11 +4244,11 @@ static inline int transport_set_tasks_sectors( static int transport_map_sg_to_mem( struct se_cmd *cmd, struct list_head *se_mem_list, - struct scatterlist *sg, - u32 *sg_count) + struct scatterlist *sg) { struct se_mem *se_mem; u32 cmd_size = cmd->data_length; + int sg_count = 0; WARN_ON(!sg); @@ -4403,7 +4278,7 @@ static int transport_map_sg_to_mem( se_mem->se_len = cmd_size; cmd_size -= se_mem->se_len; - (*sg_count)++; + sg_count++; DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n", sg_count, cmd_size); @@ -4415,7 +4290,7 @@ static int transport_map_sg_to_mem( DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count); - return 0; + return sg_count; } /* transport_map_mem_to_sg(): @@ -4425,7 +4300,7 @@ static int transport_map_sg_to_mem( int transport_map_mem_to_sg( struct se_task *task, struct list_head *se_mem_list, - void *in_mem, + struct scatterlist *sg, struct se_mem *in_se_mem, struct se_mem **out_se_mem, u32 *se_mem_cnt, @@ -4433,7 +4308,6 @@ int transport_map_mem_to_sg( { struct se_cmd *se_cmd = task->task_se_cmd; struct se_mem *se_mem = in_se_mem; - struct scatterlist *sg = (struct scatterlist *)in_mem; u32 task_size = task->task_size, sg_no = 0; if (!sg) { @@ -4444,7 +4318,7 @@ int transport_map_mem_to_sg( while (task_size != 0) { /* - * Setup the contigious array of scatterlists for + * Setup the contiguous array of scatterlists for * this struct se_task. */ sg_assign_page(sg, se_mem->se_page); @@ -4456,7 +4330,7 @@ int transport_map_mem_to_sg( sg->length = se_mem->se_len; if (!(list_is_last(&se_mem->se_list, - &se_cmd->t_task.t_mem_list))) { + &se_cmd->t_mem_list))) { se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); (*se_mem_cnt)++; @@ -4492,7 +4366,7 @@ int transport_map_mem_to_sg( sg->length = (se_mem->se_len - *task_offset); if (!(list_is_last(&se_mem->se_list, - &se_cmd->t_task.t_mem_list))) { + &se_cmd->t_mem_list))) { se_mem = list_entry(se_mem->se_list.next, struct se_mem, se_list); (*se_mem_cnt)++; @@ -4548,9 +4422,9 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) } /* * Walk the struct se_task list and setup scatterlist chains - * for each contiguosly allocated struct se_task->task_sg[]. + * for each contiguously allocated struct se_task->task_sg[]. */ - list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task_list, t_list) { if (!(task->task_sg) || !(task->task_padded_sg)) continue; @@ -4561,7 +4435,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Either add chain or mark end of scatterlist */ if (!(list_is_last(&task->t_list, - &cmd->t_task.t_task_list))) { + &cmd->t_task_list))) { /* * Clear existing SGL termination bit set in * transport_init_task_sg(), see sg_mark_end() @@ -4587,7 +4461,7 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) /* * Check for single task.. */ - if (!(list_is_last(&task->t_list, &cmd->t_task.t_task_list))) { + if (!(list_is_last(&task->t_list, &cmd->t_task_list))) { /* * Clear existing SGL termination bit set in * transport_init_task_sg(), see sg_mark_end() @@ -4605,15 +4479,15 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) * Setup the starting pointer and total t_tasks_sg_linked_no including * padding SGs for linking and to mark the end. */ - cmd->t_task.t_tasks_sg_chained = sg_first; - cmd->t_task.t_tasks_sg_chained_no = sg_count; + cmd->t_tasks_sg_chained = sg_first; + cmd->t_tasks_sg_chained_no = sg_count; - DEBUG_CMD_M("Setup cmd: %p cmd->t_task.t_tasks_sg_chained: %p and" - " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_task.t_tasks_sg_chained, - cmd->t_task.t_tasks_sg_chained_no); + DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" + " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, + cmd->t_tasks_sg_chained_no); - for_each_sg(cmd->t_task.t_tasks_sg_chained, sg, - cmd->t_task.t_tasks_sg_chained_no, i) { + for_each_sg(cmd->t_tasks_sg_chained, sg, + cmd->t_tasks_sg_chained_no, i) { DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", i, sg, sg_page(sg), sg->length, sg->offset); @@ -4646,7 +4520,7 @@ static int transport_do_se_mem_map( in_mem, in_se_mem, out_se_mem, se_mem_cnt, task_offset_in); if (ret == 0) - task->task_se_cmd->t_task.t_tasks_se_num += *se_mem_cnt; + task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; return ret; } @@ -4684,7 +4558,10 @@ static int transport_do_se_mem_map( task_offset_in); } -static u32 transport_generic_get_cdb_count( +/* + * Break up cmd into chunks transport can handle + */ +static u32 transport_allocate_tasks( struct se_cmd *cmd, unsigned long long lba, u32 sectors, @@ -4694,17 +4571,18 @@ static u32 transport_generic_get_cdb_count( { unsigned char *cdb = NULL; struct se_task *task; - struct se_mem *se_mem = NULL, *se_mem_lout = NULL; - struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL; + struct se_mem *se_mem = NULL; + struct se_mem *se_mem_lout = NULL; + struct se_mem *se_mem_bidi = NULL; + struct se_mem *se_mem_bidi_lout = NULL; struct se_device *dev = cmd->se_dev; - int max_sectors_set = 0, ret; - u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0; + int ret; + u32 task_offset_in = 0; + u32 se_mem_cnt = 0; + u32 se_mem_bidi_cnt = 0; + u32 task_cdbs = 0; - if (!mem_list) { - printk(KERN_ERR "mem_list is NULL in transport_generic_get" - "_cdb_count()\n"); - return 0; - } + BUG_ON(!mem_list); /* * While using RAMDISK_DR backstores is the only case where * mem_list will ever be empty at this point. @@ -4715,40 +4593,47 @@ static u32 transport_generic_get_cdb_count( * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation */ - if (!list_empty(&cmd->t_task.t_mem_bidi_list) && + if (!list_empty(&cmd->t_mem_bidi_list) && (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) - se_mem_bidi = list_first_entry(&cmd->t_task.t_mem_bidi_list, + se_mem_bidi = list_first_entry(&cmd->t_mem_bidi_list, struct se_mem, se_list); while (sectors) { + sector_t limited_sectors; + DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", cmd->se_tfo->get_task_tag(cmd), lba, sectors, transport_dev_end_lba(dev)); + limited_sectors = transport_limit_task_sectors(dev, lba, sectors); + if (!limited_sectors) + break; + task = transport_generic_get_task(cmd, data_direction); - if (!(task)) + if (!task) goto out; - transport_set_tasks_sectors(task, dev, lba, sectors, - &max_sectors_set); - task->task_lba = lba; + task->task_sectors = limited_sectors; lba += task->task_sectors; sectors -= task->task_sectors; task->task_size = (task->task_sectors * dev->se_sub_dev->se_dev_attrib.block_size); cdb = dev->transport->get_cdb(task); - if ((cdb)) { - memcpy(cdb, cmd->t_task.t_task_cdb, - scsi_command_size(cmd->t_task.t_task_cdb)); - cmd->transport_split_cdb(task->task_lba, - &task->task_sectors, cdb); - } + /* Should be part of task, can't fail */ + BUG_ON(!cdb); + + memcpy(cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); + + /* Update new cdb with updated lba/sectors */ + cmd->transport_split_cdb(task->task_lba, + &task->task_sectors, cdb); /* * Perform the SE OBJ plugin and/or Transport plugin specific - * mapping for cmd->t_task.t_mem_list. And setup the + * mapping for cmd->t_mem_list. And setup the * task->task_sg and if necessary task->task_sg_bidi */ ret = transport_do_se_mem_map(dev, task, mem_list, @@ -4759,7 +4644,7 @@ static u32 transport_generic_get_cdb_count( se_mem = se_mem_lout; /* - * Setup the cmd->t_task.t_mem_bidi_list -> task->task_sg_bidi + * Setup the cmd->t_mem_bidi_list -> task->task_sg_bidi * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI * * Note that the first call to transport_do_se_mem_map() above will @@ -4769,7 +4654,7 @@ static u32 transport_generic_get_cdb_count( */ if (task->task_sg_bidi != NULL) { ret = transport_do_se_mem_map(dev, task, - &cmd->t_task.t_mem_bidi_list, NULL, + &cmd->t_mem_bidi_list, NULL, se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, &task_offset_in); if (ret < 0) @@ -4781,19 +4666,11 @@ static u32 transport_generic_get_cdb_count( DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", task_cdbs, task->task_sg_num); - - if (max_sectors_set) { - max_sectors_set = 0; - continue; - } - - if (!sectors) - break; } if (set_counts) { - atomic_inc(&cmd->t_task.t_fe_count); - atomic_inc(&cmd->t_task.t_se_count); + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); } DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", @@ -4818,27 +4695,27 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; cdb = dev->transport->get_cdb(task); - if (cdb) - memcpy(cdb, cmd->t_task.t_task_cdb, - scsi_command_size(cmd->t_task.t_task_cdb)); + BUG_ON(!cdb); + memcpy(cdb, cmd->t_task_cdb, + scsi_command_size(cmd->t_task_cdb)); task->task_size = cmd->data_length; task->task_sg_num = (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; - atomic_inc(&cmd->t_task.t_fe_count); - atomic_inc(&cmd->t_task.t_se_count); + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { struct se_mem *se_mem = NULL, *se_mem_lout = NULL; u32 se_mem_cnt = 0, task_offset = 0; - if (!list_empty(&cmd->t_task.t_mem_list)) - se_mem = list_first_entry(&cmd->t_task.t_mem_list, + if (!list_empty(&cmd->t_mem_list)) + se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); ret = transport_do_se_mem_map(dev, task, - &cmd->t_task.t_mem_list, NULL, se_mem, + &cmd->t_mem_list, NULL, se_mem, &se_mem_lout, &se_mem_cnt, &task_offset); if (ret < 0) return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; @@ -4869,9 +4746,8 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) /* * Generate struct se_task(s) and/or their payloads for this CDB. */ -static int transport_generic_new_cmd(struct se_cmd *cmd) +int transport_generic_new_cmd(struct se_cmd *cmd) { - struct se_portal_group *se_tpg; struct se_task *task; struct se_device *dev = cmd->se_dev; int ret = 0; @@ -4880,7 +4756,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) * Determine is the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() * to setup beforehand the linked list of physical memory at - * cmd->t_task.t_mem_list of struct se_mem->se_page + * cmd->t_mem_list of struct se_mem->se_page */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { ret = transport_allocate_resources(cmd); @@ -4888,28 +4764,12 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) return ret; } - ret = transport_get_sectors(cmd); - if (ret < 0) - return ret; - ret = transport_new_cmd_obj(cmd); if (ret < 0) return ret; - /* - * Determine if the calling TCM fabric module is talking to - * Linux/NET via kernel sockets and needs to allocate a - * struct iovec array to complete the struct se_cmd - */ - se_tpg = cmd->se_lun->lun_sep->sep_tpg; - if (se_tpg->se_tpg_tfo->alloc_cmd_iovecs != NULL) { - ret = se_tpg->se_tpg_tfo->alloc_cmd_iovecs(cmd); - if (ret < 0) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; - } - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - list_for_each_entry(task, &cmd->t_task.t_task_list, t_list) { + list_for_each_entry(task, &cmd->t_task_list, t_list) { if (atomic_read(&task->task_sent)) continue; if (!dev->transport->map_task_SG) @@ -4926,7 +4786,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) } /* - * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready.. + * For WRITEs, let the fabric know its buffer is ready.. * This WRITE struct se_cmd (and all of its associated struct se_task's) * will be added to the struct se_device execution queue after its WRITE * data has arrived. (ie: It gets handled by the transport processing @@ -4943,6 +4803,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd) transport_execute_tasks(cmd); return 0; } +EXPORT_SYMBOL(transport_generic_new_cmd); /* transport_generic_process_write(): * @@ -4956,9 +4817,9 @@ void transport_generic_process_write(struct se_cmd *cmd) * original EDTL */ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { - if (!cmd->t_task.t_tasks_se_num) { + if (!cmd->t_tasks_se_num) { unsigned char *dst, *buf = - (unsigned char *)cmd->t_task.t_task_buf; + (unsigned char *)cmd->t_task_buf; dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); if (!(dst)) { @@ -4970,15 +4831,15 @@ void transport_generic_process_write(struct se_cmd *cmd) } memcpy(dst, buf, cmd->cmd_spdtl); - kfree(cmd->t_task.t_task_buf); - cmd->t_task.t_task_buf = dst; + kfree(cmd->t_task_buf); + cmd->t_task_buf = dst; } else { struct scatterlist *sg = - (struct scatterlist *sg)cmd->t_task.t_task_buf; + (struct scatterlist *sg)cmd->t_task_buf; struct scatterlist *orig_sg; orig_sg = kzalloc(sizeof(struct scatterlist) * - cmd->t_task.t_tasks_se_num, + cmd->t_tasks_se_num, GFP_KERNEL))) { if (!(orig_sg)) { printk(KERN_ERR "Unable to allocate memory" @@ -4988,9 +4849,9 @@ void transport_generic_process_write(struct se_cmd *cmd) return; } - memcpy(orig_sg, cmd->t_task.t_task_buf, + memcpy(orig_sg, cmd->t_task_buf, sizeof(struct scatterlist) * - cmd->t_task.t_tasks_se_num); + cmd->t_tasks_se_num); cmd->data_length = cmd->cmd_spdtl; /* @@ -5021,22 +4882,23 @@ static int transport_generic_write_pending(struct se_cmd *cmd) unsigned long flags; int ret; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = TRANSPORT_WRITE_PENDING; - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); /* * For the TCM control CDBs using a contiguous buffer, do the memcpy * from the passed Linux/SCSI struct scatterlist located at - * se_cmd->t_task.t_task_pt_buf to the contiguous buffer at - * se_cmd->t_task.t_task_buf. + * se_cmd->t_task_pt_sgl to the contiguous buffer at + * se_cmd->t_task_buf. */ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) - transport_memcpy_read_contig(cmd, - cmd->t_task.t_task_buf, - cmd->t_task.t_task_pt_sgl); + sg_copy_to_buffer(cmd->t_task_pt_sgl, + cmd->t_task_pt_sgl_num, + cmd->t_task_buf, + cmd->data_length); /* * Clear the se_cmd for WRITE_PENDING status in order to set - * cmd->t_task.t_transport_active=0 so that transport_generic_handle_data + * cmd->t_transport_active=0 so that transport_generic_handle_data * can be called from HW target mode interrupt code. This is safe * to be called with transport_off=1 before the cmd->se_tfo->write_pending * because the se_cmd->se_lun pointer is not being cleared. @@ -5123,28 +4985,28 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) * If the frontend has already requested this struct se_cmd to * be stopped, we can safely ignore this struct se_cmd. */ - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - if (atomic_read(&cmd->t_task.t_transport_stop)) { - atomic_set(&cmd->t_task.transport_lun_stop, 0); + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (atomic_read(&cmd->t_transport_stop)) { + atomic_set(&cmd->transport_lun_stop, 0); DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_cmd_check_stop(cmd, 1, 0); return -EPERM; } - atomic_set(&cmd->t_task.transport_lun_fe_stop, 1); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + atomic_set(&cmd->transport_lun_fe_stop, 1); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); ret = transport_stop_tasks_for_cmd(cmd); DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" - " %d\n", cmd, cmd->t_task.t_task_cdbs, ret); + " %d\n", cmd, cmd->t_task_cdbs, ret); if (!ret) { DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", cmd->se_tfo->get_task_tag(cmd)); - wait_for_completion(&cmd->t_task.transport_lun_stop_comp); + wait_for_completion(&cmd->transport_lun_stop_comp); DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", cmd->se_tfo->get_task_tag(cmd)); } @@ -5174,19 +5036,19 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) struct se_cmd, se_lun_node); list_del(&cmd->se_lun_node); - atomic_set(&cmd->t_task.transport_lun_active, 0); + atomic_set(&cmd->transport_lun_active, 0); /* * This will notify iscsi_target_transport.c: * transport_cmd_check_stop() that a LUN shutdown is in * progress for the iscsi_cmd_t. */ - spin_lock(&cmd->t_task.t_state_lock); - DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->t_task.transport" + spin_lock(&cmd->t_state_lock); + DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport" "_lun_stop for ITT: 0x%08x\n", cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&cmd->t_task.transport_lun_stop, 1); - spin_unlock(&cmd->t_task.t_state_lock); + atomic_set(&cmd->transport_lun_stop, 1); + spin_unlock(&cmd->t_state_lock); spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5214,14 +5076,14 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags); - if (!(atomic_read(&cmd->t_task.transport_dev_active))) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); + spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); + if (!(atomic_read(&cmd->transport_dev_active))) { + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); goto check_cond; } - atomic_set(&cmd->t_task.transport_dev_active, 0); + atomic_set(&cmd->transport_dev_active, 0); transport_all_task_dev_remove_state(cmd); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); transport_free_dev_tasks(cmd); /* @@ -5238,24 +5100,24 @@ check_cond: * be released, notify the waiting thread now that LU has * finished accessing it. */ - spin_lock_irqsave(&cmd->t_task.t_state_lock, cmd_flags); - if (atomic_read(&cmd->t_task.transport_lun_fe_stop)) { + spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); + if (atomic_read(&cmd->transport_lun_fe_stop)) { DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" " struct se_cmd: %p ITT: 0x%08x\n", lun->unpacked_lun, cmd, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); transport_cmd_check_stop(cmd, 1, 0); - complete(&cmd->t_task.transport_lun_fe_stop_comp); + complete(&cmd->transport_lun_fe_stop_comp); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, cmd_flags); + spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); } spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); @@ -5301,15 +5163,15 @@ static void transport_generic_wait_for_tasks( if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) return; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); /* * If we are already stopped due to an external event (ie: LUN shutdown) * sleep until the connection can have the passed struct se_cmd back. - * The cmd->t_task.transport_lun_stopped_sem will be upped by + * The cmd->transport_lun_stopped_sem will be upped by * transport_clear_lun_from_sessions() once the ConfigFS context caller * has completed its operation on the struct se_cmd. */ - if (atomic_read(&cmd->t_task.transport_lun_stop)) { + if (atomic_read(&cmd->transport_lun_stop)) { DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" " wait_for_completion(&cmd->t_tasktransport_lun_fe" @@ -5322,10 +5184,10 @@ static void transport_generic_wait_for_tasks( * We go ahead and up transport_lun_stop_comp just to be sure * here. */ - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); - complete(&cmd->t_task.transport_lun_stop_comp); - wait_for_completion(&cmd->t_task.transport_lun_fe_stop_comp); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + complete(&cmd->transport_lun_stop_comp); + wait_for_completion(&cmd->transport_lun_fe_stop_comp); + spin_lock_irqsave(&cmd->t_state_lock, flags); transport_all_task_dev_remove_state(cmd); /* @@ -5338,13 +5200,13 @@ static void transport_generic_wait_for_tasks( "stop_comp); for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); - atomic_set(&cmd->t_task.transport_lun_stop, 0); + atomic_set(&cmd->transport_lun_stop, 0); } - if (!atomic_read(&cmd->t_task.t_transport_active) || - atomic_read(&cmd->t_task.t_transport_aborted)) + if (!atomic_read(&cmd->t_transport_active) || + atomic_read(&cmd->t_transport_aborted)) goto remove; - atomic_set(&cmd->t_task.t_transport_stop, 1); + atomic_set(&cmd->t_transport_stop, 1); DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" @@ -5352,21 +5214,21 @@ static void transport_generic_wait_for_tasks( cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state); - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq); - wait_for_completion(&cmd->t_task.t_transport_stop_comp); + wait_for_completion(&cmd->t_transport_stop_comp); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - atomic_set(&cmd->t_task.t_transport_active, 0); - atomic_set(&cmd->t_task.t_transport_stop, 0); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_set(&cmd->t_transport_active, 0); + atomic_set(&cmd->t_transport_stop, 0); DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" - "&cmd->t_task.t_transport_stop_comp) for ITT: 0x%08x\n", + "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); remove: - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (!remove_cmd) return; @@ -5405,13 +5267,13 @@ int transport_send_check_condition_and_sense( int offset; u8 asc = 0, ascq = 0; - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; - spin_unlock_irqrestore(&cmd->t_task.t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (!reason && from_transport) goto after_reason; @@ -5570,14 +5432,14 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) { int ret = 0; - if (atomic_read(&cmd->t_task.t_transport_aborted) != 0) { + if (atomic_read(&cmd->t_transport_aborted) != 0) { if (!(send_status) || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; #if 0 printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", - cmd->t_task.t_task_cdb[0], + cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS; @@ -5598,7 +5460,7 @@ void transport_send_task_abort(struct se_cmd *cmd) */ if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->se_tfo->write_pending_status(cmd) != 0) { - atomic_inc(&cmd->t_task.t_transport_aborted); + atomic_inc(&cmd->t_transport_aborted); smp_mb__after_atomic_inc(); cmd->scsi_status = SAM_STAT_TASK_ABORTED; transport_new_cmd_failure(cmd); @@ -5608,7 +5470,7 @@ void transport_send_task_abort(struct se_cmd *cmd) cmd->scsi_status = SAM_STAT_TASK_ABORTED; #if 0 printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," - " ITT: 0x%08x\n", cmd->t_task.t_task_cdb[0], + " ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); #endif cmd->se_tfo->queue_status(cmd); @@ -5620,14 +5482,12 @@ void transport_send_task_abort(struct se_cmd *cmd) */ int transport_generic_do_tmr(struct se_cmd *cmd) { - struct se_cmd *ref_cmd; struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; switch (tmr->function) { case TMR_ABORT_TASK: - ref_cmd = tmr->ref_cmd; tmr->response = TMR_FUNCTION_REJECTED; break; case TMR_ABORT_TASK_SET: @@ -5699,7 +5559,7 @@ static void transport_processing_shutdown(struct se_device *dev) spin_unlock_irqrestore(&dev->execute_task_lock, flags); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, flags); DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," " i_state/def_i_state: %d/%d, t_state/def_t_state:" @@ -5707,22 +5567,22 @@ static void transport_processing_shutdown(struct se_device *dev) cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, cmd->t_state, cmd->deferred_t_state, - cmd->t_task.t_task_cdb[0]); + cmd->t_task_cdb[0]); DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" " %d t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", cmd->se_tfo->get_task_tag(cmd), - cmd->t_task.t_task_cdbs, - atomic_read(&cmd->t_task.t_task_cdbs_left), - atomic_read(&cmd->t_task.t_task_cdbs_sent), - atomic_read(&cmd->t_task.t_transport_active), - atomic_read(&cmd->t_task.t_transport_stop), - atomic_read(&cmd->t_task.t_transport_sent)); + cmd->t_task_cdbs, + atomic_read(&cmd->t_task_cdbs_left), + atomic_read(&cmd->t_task_cdbs_sent), + atomic_read(&cmd->t_transport_active), + atomic_read(&cmd->t_transport_stop), + atomic_read(&cmd->t_transport_sent)); if (atomic_read(&task->task_active)) { atomic_set(&task->task_stop, 1); spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); DEBUG_DO("Waiting for task: %p to shutdown for dev:" " %p\n", task, dev); @@ -5730,8 +5590,8 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("Completed task: %p shutdown for dev: %p\n", task, dev); - spin_lock_irqsave(&cmd->t_task.t_state_lock, flags); - atomic_dec(&cmd->t_task.t_task_cdbs_left); + spin_lock_irqsave(&cmd->t_state_lock, flags); + atomic_dec(&cmd->t_task_cdbs_left); atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); @@ -5741,25 +5601,25 @@ static void transport_processing_shutdown(struct se_device *dev) } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&cmd->t_task.t_task_cdbs_ex_left))) { + if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); DEBUG_DO("Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, - atomic_read(&cmd->t_task.t_task_cdbs_ex_left)); + atomic_read(&cmd->t_task_cdbs_ex_left)); spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - if (atomic_read(&cmd->t_task.t_transport_active)) { + if (atomic_read(&cmd->t_transport_active)) { DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" " %p\n", task, dev); - if (atomic_read(&cmd->t_task.t_fe_count)) { + if (atomic_read(&cmd->t_fe_count)) { spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); transport_send_check_condition_and_sense( cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); @@ -5770,7 +5630,7 @@ static void transport_processing_shutdown(struct se_device *dev) transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); @@ -5787,9 +5647,9 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", task, dev); - if (atomic_read(&cmd->t_task.t_fe_count)) { + if (atomic_read(&cmd->t_fe_count)) { spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_remove_cmd_from_queue(cmd, @@ -5799,7 +5659,7 @@ static void transport_processing_shutdown(struct se_device *dev) transport_cmd_check_stop(cmd, 1, 0); } else { spin_unlock_irqrestore( - &cmd->t_task.t_state_lock, flags); + &cmd->t_state_lock, flags); transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); @@ -5820,7 +5680,7 @@ static void transport_processing_shutdown(struct se_device *dev) DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", cmd, cmd->t_state); - if (atomic_read(&cmd->t_task.t_fe_count)) { + if (atomic_read(&cmd->t_fe_count)) { transport_send_check_condition_and_sense(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 3b8b02cf4b41..d28e9c4a1c99 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -270,7 +270,7 @@ void core_scsi3_ua_for_check_condition( nacl->se_tpg->se_tpg_tfo->get_fabric_name(), (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl, - cmd->orig_fe_lun, cmd->t_task.t_task_cdb[0], *asc, *ascq); + cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); } int core_scsi3_ua_clear_for_request_sense( diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 6d9553bbba30..910306ce48de 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -60,7 +60,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) struct fc_seq *sp; struct se_cmd *se_cmd; struct se_mem *mem; - struct se_transport_task *task; if (!(ft_debug_logging & FT_DEBUG_IO)) return; @@ -72,12 +71,11 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) caller, cmd, cmd->cdb); printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); - task = &se_cmd->t_task; - printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", - caller, cmd, task, task->t_tasks_se_num, - task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); + printk(KERN_INFO "%s: cmd %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", + caller, cmd, se_cmd->t_tasks_se_num, + se_cmd->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); - list_for_each_entry(mem, &task->t_mem_list, se_list) + list_for_each_entry(mem, &se_cmd->t_mem_list, se_list) printk(KERN_INFO "%s: cmd %p mem %p page %p " "len 0x%x off 0x%x\n", caller, cmd, mem, @@ -262,9 +260,9 @@ int ft_write_pending(struct se_cmd *se_cmd) * TCM/LIO target */ transport_do_task_sg_chain(se_cmd); - cmd->sg = se_cmd->t_task.t_tasks_sg_chained; + cmd->sg = se_cmd->t_tasks_sg_chained; cmd->sg_cnt = - se_cmd->t_task.t_tasks_sg_chained_no; + se_cmd->t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, cmd->sg, cmd->sg_cnt)) diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index f18af6e99b83..8560182f0dad 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -65,7 +65,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); - struct se_transport_task *task; struct fc_frame *fp = NULL; struct fc_exch *ep; struct fc_lport *lport; @@ -90,14 +89,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd) lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); - task = &se_cmd->t_task; remaining = se_cmd->data_length; /* * Setup to use first mem list entry if any. */ - if (task->t_tasks_se_num) { - mem = list_first_entry(&task->t_mem_list, + if (se_cmd->t_tasks_se_num) { + mem = list_first_entry(&se_cmd->t_mem_list, struct se_mem, se_list); mem_len = mem->se_len; mem_off = mem->se_off; @@ -148,8 +146,8 @@ int ft_queue_data_in(struct se_cmd *se_cmd) if (use_sg) { if (!mem) { - BUG_ON(!task->t_task_buf); - page_addr = task->t_task_buf + mem_off; + BUG_ON(!se_cmd->t_task_buf); + page_addr = se_cmd->t_task_buf + mem_off; /* * In this case, offset is 'offset_in_page' of * (t_task_buf + mem_off) instead of 'mem_off'. @@ -180,7 +178,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) kunmap_atomic(page_addr, KM_SOFTIRQ0); to += tlen; } else { - from = task->t_task_buf + mem_off; + from = se_cmd->t_task_buf + mem_off; memcpy(to, from, tlen); to += tlen; } @@ -220,7 +218,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) struct fc_seq *seq = cmd->seq; struct fc_exch *ep; struct fc_lport *lport; - struct se_transport_task *task; struct fc_frame_header *fh; struct se_mem *mem; u32 mem_off; @@ -235,8 +232,6 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) u32 f_ctl; void *buf; - task = &se_cmd->t_task; - fh = fc_frame_header_get(fp); if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) goto drop; @@ -312,8 +307,8 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) /* * Setup to use first mem list entry if any. */ - if (task->t_tasks_se_num) { - mem = list_first_entry(&task->t_mem_list, + if (se_cmd->t_tasks_se_num) { + mem = list_first_entry(&se_cmd->t_mem_list, struct se_mem, se_list); mem_len = mem->se_len; mem_off = mem->se_off; @@ -355,7 +350,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) memcpy(to, from, tlen); kunmap_atomic(page_addr, KM_SOFTIRQ0); } else { - to = task->t_task_buf + mem_off; + to = se_cmd->t_task_buf + mem_off; memcpy(to, from, tlen); } from += tlen; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 94c838dcfc3c..71c96ce9287e 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -403,64 +403,10 @@ struct se_queue_obj { wait_queue_head_t thread_wq; } ____cacheline_aligned; -/* - * Used one per struct se_cmd to hold all extra struct se_task - * metadata. This structure is setup and allocated in - * drivers/target/target_core_transport.c:__transport_alloc_se_cmd() - */ -struct se_transport_task { - unsigned char *t_task_cdb; - unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; - unsigned long long t_task_lba; - int t_tasks_failed; - int t_tasks_fua; - bool t_tasks_bidi; - u32 t_task_cdbs; - u32 t_tasks_check; - u32 t_tasks_no; - u32 t_tasks_sectors; - u32 t_tasks_se_num; - u32 t_tasks_se_bidi_num; - u32 t_tasks_sg_chained_no; - atomic_t t_fe_count; - atomic_t t_se_count; - atomic_t t_task_cdbs_left; - atomic_t t_task_cdbs_ex_left; - atomic_t t_task_cdbs_timeout_left; - atomic_t t_task_cdbs_sent; - atomic_t t_transport_aborted; - atomic_t t_transport_active; - atomic_t t_transport_complete; - atomic_t t_transport_queue_active; - atomic_t t_transport_sent; - atomic_t t_transport_stop; - atomic_t t_transport_timeout; - atomic_t transport_dev_active; - atomic_t transport_lun_active; - atomic_t transport_lun_fe_stop; - atomic_t transport_lun_stop; - spinlock_t t_state_lock; - struct completion t_transport_stop_comp; - struct completion transport_lun_fe_stop_comp; - struct completion transport_lun_stop_comp; - struct scatterlist *t_tasks_sg_chained; - struct scatterlist t_tasks_sg_bounce; - void *t_task_buf; - /* - * Used for pre-registered fabric SGL passthrough WRITE and READ - * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop - * and other HW target mode fabric modules. - */ - struct scatterlist *t_task_pt_sgl; - struct list_head t_mem_list; - /* Used for BIDI READ */ - struct list_head t_mem_bidi_list; - struct list_head t_task_list; -} ____cacheline_aligned; - struct se_task { unsigned char task_sense; struct scatterlist *task_sg; + u32 task_sg_num; struct scatterlist *task_sg_bidi; u8 task_scsi_status; u8 task_flags; @@ -471,8 +417,6 @@ struct se_task { u32 task_no; u32 task_sectors; u32 task_size; - u32 task_sg_num; - u32 task_sg_offset; enum dma_data_direction task_data_direction; struct se_cmd *task_se_cmd; struct se_device *se_dev; @@ -534,13 +478,58 @@ struct se_cmd { /* Only used for internal passthrough and legacy TCM fabric modules */ struct se_session *se_sess; struct se_tmr_req *se_tmr_req; - struct se_transport_task t_task; struct list_head se_queue_node; struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); void (*transport_wait_for_tasks)(struct se_cmd *, int, int); void (*transport_complete_callback)(struct se_cmd *); + unsigned char *t_task_cdb; + unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; + unsigned long long t_task_lba; + int t_tasks_failed; + int t_tasks_fua; + bool t_tasks_bidi; + u32 t_tasks_se_num; + u32 t_tasks_se_bidi_num; + u32 t_tasks_sg_chained_no; + atomic_t t_fe_count; + atomic_t t_se_count; + atomic_t t_task_cdbs_left; + atomic_t t_task_cdbs_ex_left; + atomic_t t_task_cdbs_timeout_left; + atomic_t t_task_cdbs_sent; + atomic_t t_transport_aborted; + atomic_t t_transport_active; + atomic_t t_transport_complete; + atomic_t t_transport_queue_active; + atomic_t t_transport_sent; + atomic_t t_transport_stop; + atomic_t t_transport_timeout; + atomic_t transport_dev_active; + atomic_t transport_lun_active; + atomic_t transport_lun_fe_stop; + atomic_t transport_lun_stop; + spinlock_t t_state_lock; + struct completion t_transport_stop_comp; + struct completion transport_lun_fe_stop_comp; + struct completion transport_lun_stop_comp; + struct scatterlist *t_tasks_sg_chained; + struct scatterlist t_tasks_sg_bounce; + void *t_task_buf; + /* + * Used for pre-registered fabric SGL passthrough WRITE and READ + * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop + * and other HW target mode fabric modules. + */ + struct scatterlist *t_task_pt_sgl; + u32 t_task_pt_sgl_num; + struct list_head t_mem_list; + /* Used for BIDI READ */ + struct list_head t_mem_bidi_list; + struct list_head t_task_list; + u32 t_task_list_num; + } ____cacheline_aligned; struct se_tmr_req { diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index 747e1404dca0..1752ed3f77fa 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h @@ -38,11 +38,6 @@ struct target_core_fabric_ops { * initially allocated in interrupt context. */ int (*new_cmd_map)(struct se_cmd *); - /* - * Optional function pointer for TCM fabric modules that use - * Linux/NET sockets to allocate struct iovec array to struct se_cmd - */ - int (*alloc_cmd_iovecs)(struct se_cmd *); /* * Optional to release struct se_cmd and fabric dependent allocated * I/O descriptor in transport_cmd_check_stop() diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index acd591491767..c9846d521945 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -184,10 +184,11 @@ extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int); extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, - void *, struct se_mem *, + struct scatterlist *, struct se_mem *, struct se_mem **, u32 *, u32 *); extern void transport_do_task_sg_chain(struct se_cmd *); extern void transport_generic_process_write(struct se_cmd *); +extern int transport_generic_new_cmd(struct se_cmd *); extern int transport_generic_do_tmr(struct se_cmd *); /* From target_core_alua.c */ extern int core_alua_check_nonop_delay(struct se_cmd *); -- cgit v1.2.3 From dc2e652d5f36d7b1c8764c3c3174e28ec2d9903b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 31 May 2011 17:06:42 -0400 Subject: target: remove the always-noop ->new_cmd_failure method Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 11 ----------- drivers/target/target_core_configfs.c | 4 ---- drivers/target/target_core_tmr.c | 5 ----- drivers/target/target_core_transport.c | 2 -- drivers/target/tcm_fc/tcm_fc.h | 1 - drivers/target/tcm_fc/tfc_cmd.c | 6 ------ drivers/target/tcm_fc/tfc_conf.c | 1 - include/target/target_core_fabric_ops.h | 1 - 8 files changed, 31 deletions(-) (limited to 'include') diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 7ba2542aabe7..cb4a9b906a4b 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -772,16 +772,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg) return 1; } -static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd) -{ - /* - * Since TCM_loop is already passing struct scatterlist data from - * struct scsi_cmnd, no more Linux/SCSI failure dependent state need - * to be handled here. - */ - return; -} - static int tcm_loop_is_state_remove(struct se_cmd *se_cmd) { /* @@ -1446,7 +1436,6 @@ static int tcm_loop_register_configfs(void) &tcm_loop_set_default_node_attributes; fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag; fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state; - fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure; fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in; fabric->tf_ops.queue_status = &tcm_loop_queue_status; fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index ac7f7655570e..aac0ee993b90 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -479,10 +479,6 @@ static int target_fabric_tf_ops_check( printk(KERN_ERR "Missing tfo->get_cmd_state()\n"); return -EINVAL; } - if (!(tfo->new_cmd_failure)) { - printk(KERN_ERR "Missing tfo->new_cmd_failure()\n"); - return -EINVAL; - } if (!(tfo->queue_data_in)) { printk(KERN_ERR "Missing tfo->queue_data_in()\n"); return -EINVAL; diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 6667e39a35a1..5c20de3f1d1c 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -356,11 +356,6 @@ int core_tmr_lun_reset( atomic_read(&cmd->t_fe_count)); /* * Signal that the command has failed via cmd->se_cmd_flags, - * and call TFO->new_cmd_failure() to wakeup any fabric - * dependent code used to wait for unsolicited data out - * allocation to complete. The fabric module is expected - * to dump any remaining unsolicited data out for the aborted - * command at this point. */ transport_new_cmd_failure(cmd); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 6f2855dac7f8..e3544c86d1fa 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2586,8 +2586,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd) se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); - - se_cmd->se_tfo->new_cmd_failure(se_cmd); } static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 7b82f1b7fef8..8d26779e440c 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -195,7 +195,6 @@ int ft_write_pending(struct se_cmd *); int ft_write_pending_status(struct se_cmd *); u32 ft_get_task_tag(struct se_cmd *); int ft_get_cmd_state(struct se_cmd *); -void ft_new_cmd_failure(struct se_cmd *); int ft_queue_tm_resp(struct se_cmd *); int ft_is_state_remove(struct se_cmd *); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 910306ce48de..9a3b486e1aad 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -292,12 +292,6 @@ int ft_is_state_remove(struct se_cmd *se_cmd) return 0; /* XXX TBD */ } -void ft_new_cmd_failure(struct se_cmd *se_cmd) -{ - /* XXX TBD */ - printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd); -} - /* * FC sequence response handler for follow-on sequences (data) and aborts. */ diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 58e4745749db..20097728e8a0 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -550,7 +550,6 @@ static struct target_core_fabric_ops ft_fabric_ops = { .set_default_node_attributes = ft_set_default_node_attr, .get_task_tag = ft_get_task_tag, .get_cmd_state = ft_get_cmd_state, - .new_cmd_failure = ft_new_cmd_failure, .queue_data_in = ft_queue_data_in, .queue_status = ft_queue_status, .queue_tm_rsp = ft_queue_tm_resp, diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index 1752ed3f77fa..eba7201e5609 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h @@ -65,7 +65,6 @@ struct target_core_fabric_ops { void (*set_default_node_attributes)(struct se_node_acl *); u32 (*get_task_tag)(struct se_cmd *); int (*get_cmd_state)(struct se_cmd *); - void (*new_cmd_failure)(struct se_cmd *); int (*queue_data_in)(struct se_cmd *); int (*queue_status)(struct se_cmd *); int (*queue_tm_rsp)(struct se_cmd *); -- cgit v1.2.3 From db1620a2788f6c470804f6a5f983a0152188bd90 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 31 May 2011 17:06:43 -0400 Subject: target: remove the unused SCF_* flags This patch contains a squashed version to remove unused SCF_* flags: target: remove the unused SCF_SE_DISABLE_ONLINE_CHECK flag target: remove the unused SCF_CMD_PASSTHROUGH_NOALLOC flag target: remove the unused SCF_EMULATE_SYNC_UNMAP flag target: remove the unused SCF_EMULATE_SYNC_CACHE flag Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 30 +++++------------------------- include/target/target_core_base.h | 4 ---- 2 files changed, 5 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index e3544c86d1fa..43fd2778d602 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2431,14 +2431,12 @@ static int transport_execute_tasks(struct se_cmd *cmd) { int add_tasks; - if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) { - if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { - cmd->transport_error_status = - PYX_TRANSPORT_LU_COMM_FAILURE; - transport_generic_request_failure(cmd, NULL, 0, 1); - return 0; - } + if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) { + cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; + transport_generic_request_failure(cmd, NULL, 0, 1); + return 0; } + /* * Call transport_cmd_check_stop() to see if a fabric exception * has occurred that prevents execution. @@ -3398,18 +3396,6 @@ static int transport_generic_cmd_sequencer( break; case UNMAP: size = get_unaligned_be16(&cdb[7]); - passthrough = (dev->transport->transport_type == - TRANSPORT_PLUGIN_PHBA_PDEV); - /* - * Determine if the received UNMAP used to for direct passthrough - * into Linux/SCSI with struct request via TCM/pSCSI or we are - * signaling the use of internal transport_generic_unmap() emulation - * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO - * subsystem plugin backstores. - */ - if (!(passthrough)) - cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; break; case WRITE_SAME_16: @@ -3760,12 +3746,6 @@ static inline void transport_free_pages(struct se_cmd *cmd) return; } - /* - * Caller will handle releasing of struct se_mem. - */ - if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC) - return; - list_for_each_entry_safe(se_mem, se_mem_tmp, &cmd->t_mem_list, se_list) { /* diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 71c96ce9287e..71abc4c5e2b4 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -112,11 +112,9 @@ enum se_cmd_flags_table { SCF_SCSI_NON_DATA_CDB = 0x00000040, SCF_SCSI_CDB_EXCEPTION = 0x00000080, SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, - SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200, SCF_SE_CMD_FAILED = 0x00000400, SCF_SE_LUN_CMD = 0x00000800, SCF_SE_ALLOW_EOO = 0x00001000, - SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000, SCF_SENT_CHECK_CONDITION = 0x00004000, SCF_OVERFLOW_BIT = 0x00008000, SCF_UNDERFLOW_BIT = 0x00010000, @@ -126,9 +124,7 @@ enum se_cmd_flags_table { SCF_UNUSED = 0x00100000, SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, - SCF_EMULATE_SYNC_CACHE = 0x00800000, SCF_EMULATE_CDB_ASYNC = 0x01000000, - SCF_EMULATE_SYNC_UNMAP = 0x02000000 }; /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ -- cgit v1.2.3 From 35462975b2b197b990fedbb74b81f9bea9d344cb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 31 May 2011 23:56:57 -0400 Subject: target: merge release_cmd methods The release_cmd_to_pool and release_cmd_direct methods are always the same. Merge them into a single release_cmd method, and clean up the fallout. (nab: fix breakage in transport_generic_free_cmd() parameter build breakage in drivers/target/tcm_fc/tfc_cmd.c) Signed-off-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 12 +++----- drivers/target/target_core_configfs.c | 8 ++--- drivers/target/target_core_transport.c | 53 ++++++++++++--------------------- drivers/target/tcm_fc/tfc_cmd.c | 12 ++++---- drivers/target/tcm_fc/tfc_conf.c | 3 +- include/target/target_core_fabric_ops.h | 3 +- include/target/target_core_transport.h | 4 +-- 7 files changed, 35 insertions(+), 60 deletions(-) (limited to 'include') diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index cb4a9b906a4b..c2937b2035d3 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -204,13 +204,10 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd) * Release the struct se_cmd, which will make a callback to release * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() */ - transport_generic_free_cmd(se_cmd, 0, 1, 0); + transport_generic_free_cmd(se_cmd, 0, 0); } -/* - * Called from struct target_core_fabric_ops->release_cmd_to_pool() - */ -static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd) +static void tcm_loop_release_cmd(struct se_cmd *se_cmd) { struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, struct tcm_loop_cmd, tl_se_cmd); @@ -395,7 +392,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) SUCCESS : FAILED; release: if (se_cmd) - transport_generic_free_cmd(se_cmd, 1, 1, 0); + transport_generic_free_cmd(se_cmd, 1, 0); else kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); kfree(tl_tmr); @@ -1418,8 +1415,7 @@ static int tcm_loop_register_configfs(void) */ fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map; fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free; - fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd; - fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd; + fabric->tf_ops.release_cmd = &tcm_loop_release_cmd; fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session; fabric->tf_ops.close_session = &tcm_loop_close_session; fabric->tf_ops.stop_session = &tcm_loop_stop_session; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index aac0ee993b90..63cba1e243ef 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -427,12 +427,8 @@ static int target_fabric_tf_ops_check( printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n"); return -EINVAL; } - if (!(tfo->release_cmd_to_pool)) { - printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n"); - return -EINVAL; - } - if (!(tfo->release_cmd_direct)) { - printk(KERN_ERR "Missing tfo->release_cmd_direct()\n"); + if (!tfo->release_cmd) { + printk(KERN_ERR "Missing tfo->release_cmd()\n"); return -EINVAL; } if (!(tfo->shutdown_session)) { diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 43fd2778d602..d42a98e4725b 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -212,7 +212,7 @@ static u32 transport_allocate_tasks(struct se_cmd *cmd, struct list_head *mem_list, int set_counts); static int transport_generic_get_mem(struct se_cmd *cmd, u32 length); static int transport_generic_remove(struct se_cmd *cmd, - int release_to_pool, int session_reinstatement); + int session_reinstatement); static int transport_cmd_get_valid_sectors(struct se_cmd *cmd); static int transport_map_sg_to_mem(struct se_cmd *cmd, struct list_head *se_mem_list, struct scatterlist *sgl); @@ -737,7 +737,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) if (transport_cmd_check_stop_to_fabric(cmd)) return; if (remove) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) @@ -747,7 +747,7 @@ void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) if (transport_cmd_check_stop_to_fabric(cmd)) return; - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } static void transport_add_cmd_to_queue( @@ -2146,7 +2146,7 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) } spin_unlock_irqrestore(&cmd->t_state_lock, flags); - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } static int @@ -3825,13 +3825,11 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) free_pages: transport_free_pages(cmd); transport_free_se_cmd(cmd); - cmd->se_tfo->release_cmd_direct(cmd); + cmd->se_tfo->release_cmd(cmd); } -static int transport_generic_remove( - struct se_cmd *cmd, - int release_to_pool, - int session_reinstatement) +static int +transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) { unsigned long flags; @@ -3858,14 +3856,7 @@ static int transport_generic_remove( free_pages: transport_free_pages(cmd); - - if (release_to_pool) { - transport_release_cmd_to_pool(cmd); - } else { - transport_free_se_cmd(cmd); - cmd->se_tfo->release_cmd_direct(cmd); - } - + transport_release_cmd(cmd); return 0; } @@ -4894,18 +4885,14 @@ static int transport_generic_write_pending(struct se_cmd *cmd) return PYX_TRANSPORT_WRITE_PENDING; } -/* transport_release_cmd_to_pool(): - * - * - */ -void transport_release_cmd_to_pool(struct se_cmd *cmd) +void transport_release_cmd(struct se_cmd *cmd) { BUG_ON(!cmd->se_tfo); transport_free_se_cmd(cmd); - cmd->se_tfo->release_cmd_to_pool(cmd); + cmd->se_tfo->release_cmd(cmd); } -EXPORT_SYMBOL(transport_release_cmd_to_pool); +EXPORT_SYMBOL(transport_release_cmd); /* transport_generic_free_cmd(): * @@ -4914,11 +4901,10 @@ EXPORT_SYMBOL(transport_release_cmd_to_pool); void transport_generic_free_cmd( struct se_cmd *cmd, int wait_for_tasks, - int release_to_pool, int session_reinstatement) { if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) - transport_release_cmd_to_pool(cmd); + transport_release_cmd(cmd); else { core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); @@ -4936,8 +4922,7 @@ void transport_generic_free_cmd( transport_free_dev_tasks(cmd); - transport_generic_remove(cmd, release_to_pool, - session_reinstatement); + transport_generic_remove(cmd, session_reinstatement); } } EXPORT_SYMBOL(transport_generic_free_cmd); @@ -5210,7 +5195,7 @@ remove: if (!remove_cmd) return; - transport_generic_free_cmd(cmd, 0, 0, session_reinstatement); + transport_generic_free_cmd(cmd, 0, session_reinstatement); } static int transport_get_sense_codes( @@ -5616,7 +5601,7 @@ static void transport_processing_shutdown(struct se_device *dev) transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -5644,7 +5629,7 @@ static void transport_processing_shutdown(struct se_device *dev) transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } spin_lock_irqsave(&dev->execute_task_lock, flags); @@ -5667,7 +5652,7 @@ static void transport_processing_shutdown(struct se_device *dev) } else { transport_lun_remove_cmd(cmd); if (transport_cmd_check_stop(cmd, 1, 0)) - transport_generic_remove(cmd, 0, 0); + transport_generic_remove(cmd, 0); } } } @@ -5739,10 +5724,10 @@ get_cmd: transport_generic_complete_ok(cmd); break; case TRANSPORT_REMOVE: - transport_generic_remove(cmd, 1, 0); + transport_generic_remove(cmd, 0); break; case TRANSPORT_FREE_CMD_INTR: - transport_generic_free_cmd(cmd, 0, 1, 0); + transport_generic_free_cmd(cmd, 0, 0); break; case TRANSPORT_PROCESS_TMR: transport_generic_do_tmr(cmd); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 9a3b486e1aad..74d5bb766201 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -147,7 +147,7 @@ void ft_release_cmd(struct se_cmd *se_cmd) void ft_check_stop_free(struct se_cmd *se_cmd) { - transport_generic_free_cmd(se_cmd, 0, 1, 0); + transport_generic_free_cmd(se_cmd, 0, 0); } /* @@ -304,7 +304,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) /* XXX need to find cmd if queued */ cmd->se_cmd.t_state = TRANSPORT_REMOVE; cmd->seq = NULL; - transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0, 0); return; } @@ -321,7 +321,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); fc_frame_free(fp); - transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0, 0); break; } } @@ -443,7 +443,7 @@ static void ft_send_tm(struct ft_cmd *cmd) sess = cmd->sess; transport_send_check_condition_and_sense(&cmd->se_cmd, cmd->se_cmd.scsi_sense_reason, 0); - transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0); + transport_generic_free_cmd(&cmd->se_cmd, 0, 0); ft_sess_put(sess); return; } @@ -645,7 +645,7 @@ static void ft_send_cmd(struct ft_cmd *cmd) if (ret == -ENOMEM) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - transport_generic_free_cmd(se_cmd, 0, 1, 0); + transport_generic_free_cmd(se_cmd, 0, 0); return; } if (ret == -EINVAL) { @@ -654,7 +654,7 @@ static void ft_send_cmd(struct ft_cmd *cmd) else transport_send_check_condition_and_sense(se_cmd, se_cmd->scsi_sense_reason, 0); - transport_generic_free_cmd(se_cmd, 0, 1, 0); + transport_generic_free_cmd(se_cmd, 0, 0); return; } transport_generic_handle_cdb(se_cmd); diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 20097728e8a0..1cb3d345183f 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -536,8 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = { .tpg_release_fabric_acl = ft_tpg_release_fabric_acl, .tpg_get_inst_index = ft_tpg_get_inst_index, .check_stop_free = ft_check_stop_free, - .release_cmd_to_pool = ft_release_cmd, - .release_cmd_direct = ft_release_cmd, + .release_cmd = ft_release_cmd, .shutdown_session = ft_sess_shutdown, .close_session = ft_sess_close, .stop_session = ft_sess_stop, diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h index eba7201e5609..2de8fe907596 100644 --- a/include/target/target_core_fabric_ops.h +++ b/include/target/target_core_fabric_ops.h @@ -43,8 +43,7 @@ struct target_core_fabric_ops { * I/O descriptor in transport_cmd_check_stop() */ void (*check_stop_free)(struct se_cmd *); - void (*release_cmd_to_pool)(struct se_cmd *); - void (*release_cmd_direct)(struct se_cmd *); + void (*release_cmd)(struct se_cmd *); /* * Called with spin_lock_bh(struct se_portal_group->session_lock held. */ diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index c9846d521945..604e669527b4 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -179,8 +179,8 @@ extern int transport_clear_lun_from_sessions(struct se_lun *); extern int transport_check_aborted_status(struct se_cmd *, int); extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int); extern void transport_send_task_abort(struct se_cmd *); -extern void transport_release_cmd_to_pool(struct se_cmd *); -extern void transport_generic_free_cmd(struct se_cmd *, int, int, int); +extern void transport_release_cmd(struct se_cmd *); +extern void transport_generic_free_cmd(struct se_cmd *, int, int); extern void transport_generic_wait_for_cmds(struct se_cmd *, int); extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32); extern int transport_map_mem_to_sg(struct se_task *, struct list_head *, -- cgit v1.2.3 From 695434e1cbd57f404110bf4ab187a5127ffd79bb Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 3 Jun 2011 20:59:19 -0700 Subject: target: Add transport_handle_cdb_direct optimization This patch adds a transport_handle_cdb_direct() optimization for mapping and queueing tasks directly from within fabric processing context by calling the newly exported transport_generic_new_cmd(). This currently expects to be called from process context only, and will fail if called within interrupt context. This patch also leaves transport_generic_handle_cdb() unmodified for the moment to function as expected with existing tcm_fc and ib_srpt fabrics, and will be removed once these have been converted and tested with v4.1 code using transport_handle_cdb_direct(). Based on Andy's original patch here: [PATCH 39/42] target: Call transport_new_cmd instead of adding to cmd queue Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 24 ++++++++++++++++++++++++ include/target/target_core_transport.h | 1 + 2 files changed, 25 insertions(+) (limited to 'include') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d42a98e4725b..1ae6eb7a621b 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1842,11 +1842,35 @@ int transport_generic_handle_cdb( printk(KERN_ERR "cmd->se_lun is NULL\n"); return -EINVAL; } + transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); return 0; } EXPORT_SYMBOL(transport_generic_handle_cdb); +/* + * Used by fabric module frontends to queue tasks directly. + * Many only be used from process context only + */ +int transport_handle_cdb_direct( + struct se_cmd *cmd) +{ + if (!cmd->se_lun) { + dump_stack(); + printk(KERN_ERR "cmd->se_lun is NULL\n"); + return -EINVAL; + } + if (in_interrupt()) { + dump_stack(); + printk(KERN_ERR "transport_generic_handle_cdb cannot be called" + " from interrupt context\n"); + return -EINVAL; + } + + return transport_generic_new_cmd(cmd); +} +EXPORT_SYMBOL(transport_handle_cdb_direct); + /* * Used by fabric module frontends defining a TFO->new_cmd_map() caller * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 604e669527b4..2aae76412377 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -166,6 +166,7 @@ extern void transport_init_se_cmd(struct se_cmd *, extern void transport_free_se_cmd(struct se_cmd *); extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); extern int transport_generic_handle_cdb(struct se_cmd *); +extern int transport_handle_cdb_direct(struct se_cmd *); extern int transport_generic_handle_cdb_map(struct se_cmd *); extern int transport_generic_handle_data(struct se_cmd *); extern void transport_new_cmd_failure(struct se_cmd *); -- cgit v1.2.3 From 07bde79a5c355dbca66ca4318645aa17b4c0d859 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Mon, 13 Jun 2011 14:46:09 -0700 Subject: target: Add SCF_EMULATE_QUEUE_FULL -> transport_handle_queue_full This patch adds SCF_EMULATE_QUEUE_FULL support using -EAGAIN failures via transport_handle_queue_full() to signal queue full in completion path TFO->queue_data_in() and TFO->queue_status() callbacks. This is done using a new se_cmd->transport_qf_callback() to handle the following queue full exception cases within target core: *) TRANSPORT_COMPLETE_OK (for completion path queue full) *) TRANSPORT_COMPLETE_QF_WP (for TRANSPORT_WRITE_PENDING queue full) *) transport_send_check_condition_and_sense() failure paths in transport_generic_request_failure() and transport_generic_complete_ok() All logic is driven using se_device->qf_work_queue -> target_qf_do_work() to to requeue outstanding se_cmd at the head of se_dev->queue_obj->qobj_list for transport_processing_thread() execution. Tested using tcm_qla2xxx with MAX_OUTSTANDING_COMMANDS=128 for FCP READ to trigger the TRANSPORT_COMPLETE_OK queue full cases, and a simulated TFO->write_pending() -EAGAIN failure to trigger TRANSPORT_COMPLETE_QF_WP. Reported-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 194 ++++++++++++++++++++++++++++++--- include/target/target_core_base.h | 9 ++ 2 files changed, 188 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 1ae6eb7a621b..056c4cb4736d 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -204,6 +204,9 @@ static int transport_generic_write_pending(struct se_cmd *); static int transport_processing_thread(void *param); static int __transport_execute_tasks(struct se_device *dev); static void transport_complete_task_attr(struct se_cmd *cmd); +static int transport_complete_qf(struct se_cmd *cmd); +static void transport_handle_queue_full(struct se_cmd *cmd, + struct se_device *dev, int (*qf_callback)(struct se_cmd *)); static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); static u32 transport_allocate_tasks(struct se_cmd *cmd, @@ -768,7 +771,11 @@ static void transport_add_cmd_to_queue( } spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); + if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { + cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; + list_add(&cmd->se_queue_node, &qobj->qobj_list); + } else + list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); atomic_inc(&cmd->t_transport_queue_active); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); @@ -1102,6 +1109,40 @@ void transport_remove_task_from_execute_queue( spin_unlock_irqrestore(&dev->execute_task_lock, flags); } +/* + * Handle QUEUE_FULL / -EAGAIN status + */ + +static void target_qf_do_work(struct work_struct *work) +{ + struct se_device *dev = container_of(work, struct se_device, + qf_work_queue); + struct se_cmd *cmd, *cmd_tmp; + + spin_lock_irq(&dev->qf_cmd_lock); + list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { + + list_del(&cmd->se_qf_node); + atomic_dec(&dev->dev_qf_count); + smp_mb__after_atomic_dec(); + spin_unlock_irq(&dev->qf_cmd_lock); + + printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue" + " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, + (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : + (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" + : "UNKNOWN"); + /* + * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd + * has been added to head of queue + */ + transport_add_cmd_to_queue(cmd, cmd->t_state); + + spin_lock_irq(&dev->qf_cmd_lock); + } + spin_unlock_irq(&dev->qf_cmd_lock); +} + unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) { switch (cmd->data_direction) { @@ -1531,6 +1572,7 @@ struct se_device *transport_add_device_to_core_hba( INIT_LIST_HEAD(&dev->delayed_cmd_list); INIT_LIST_HEAD(&dev->ordered_cmd_list); INIT_LIST_HEAD(&dev->state_task_list); + INIT_LIST_HEAD(&dev->qf_cmd_list); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->ordered_cmd_lock); @@ -1541,6 +1583,7 @@ struct se_device *transport_add_device_to_core_hba( spin_lock_init(&dev->dev_status_thr_lock); spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_tmr_lock); + spin_lock_init(&dev->qf_cmd_lock); dev->queue_depth = dev_limits->queue_depth; atomic_set(&dev->depth_left, dev->queue_depth); @@ -1584,7 +1627,10 @@ struct se_device *transport_add_device_to_core_hba( dev->transport->name); goto out; } - + /* + * Setup work_queue for QUEUE_FULL + */ + INIT_WORK(&dev->qf_work_queue, target_qf_do_work); /* * Preload the initial INQUIRY const values if we are doing * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI @@ -1697,6 +1743,7 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_lun_node); INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_ordered_node); + INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->t_mem_list); INIT_LIST_HEAD(&cmd->t_mem_bidi_list); @@ -2019,6 +2066,8 @@ static void transport_generic_request_failure( int complete, int sc) { + int ret = 0; + DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), cmd->t_task_cdb[0]); @@ -2109,7 +2158,9 @@ static void transport_generic_request_failure( cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); - cmd->se_tfo->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret == -EAGAIN) + goto queue_full; goto check_stop; case PYX_TRANSPORT_USE_SENSE_REASON: /* @@ -2126,13 +2177,22 @@ static void transport_generic_request_failure( if (!sc) transport_new_cmd_failure(cmd); - else - transport_send_check_condition_and_sense(cmd, - cmd->scsi_sense_reason, 0); + else { + ret = transport_send_check_condition_and_sense(cmd, + cmd->scsi_sense_reason, 0); + if (ret == -EAGAIN) + goto queue_full; + } + check_stop: transport_lun_remove_cmd(cmd); if (!(transport_cmd_check_stop_to_fabric(cmd))) ; + return; + +queue_full: + cmd->t_state = TRANSPORT_COMPLETE_OK; + transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); } static void transport_direct_request_timeout(struct se_cmd *cmd) @@ -3637,9 +3697,53 @@ static void transport_complete_task_attr(struct se_cmd *cmd) wake_up_interruptible(&dev->dev_queue_obj.thread_wq); } +static int transport_complete_qf(struct se_cmd *cmd) +{ + int ret = 0; + + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) + return cmd->se_tfo->queue_status(cmd); + + switch (cmd->data_direction) { + case DMA_FROM_DEVICE: + ret = cmd->se_tfo->queue_data_in(cmd); + break; + case DMA_TO_DEVICE: + if (!list_empty(&cmd->t_mem_bidi_list)) { + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret < 0) + return ret; + } + /* Fall through for DMA_TO_DEVICE */ + case DMA_NONE: + ret = cmd->se_tfo->queue_status(cmd); + break; + default: + break; + } + + return ret; +} + +static void transport_handle_queue_full( + struct se_cmd *cmd, + struct se_device *dev, + int (*qf_callback)(struct se_cmd *)) +{ + spin_lock_irq(&dev->qf_cmd_lock); + cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; + cmd->transport_qf_callback = qf_callback; + list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); + atomic_inc(&dev->dev_qf_count); + smp_mb__after_atomic_inc(); + spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); + + schedule_work(&cmd->se_dev->qf_work_queue); +} + static void transport_generic_complete_ok(struct se_cmd *cmd) { - int reason = 0; + int reason = 0, ret; /* * Check if we need to move delayed/dormant tasks from cmds on the * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task @@ -3647,6 +3751,21 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) */ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) transport_complete_task_attr(cmd); + /* + * Check to schedule QUEUE_FULL work, or execute an existing + * cmd->transport_qf_callback() + */ + if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) + schedule_work(&cmd->se_dev->qf_work_queue); + + if (cmd->transport_qf_callback) { + ret = cmd->transport_qf_callback(cmd); + if (ret < 0) + goto queue_full; + + cmd->transport_qf_callback = NULL; + goto done; + } /* * Check if we need to retrieve a sense buffer from * the struct se_cmd in question. @@ -3660,8 +3779,11 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) * a non GOOD status. */ if (cmd->scsi_status) { - transport_send_check_condition_and_sense( + ret = transport_send_check_condition_and_sense( cmd, reason, 1); + if (ret == -EAGAIN) + goto queue_full; + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; @@ -3693,7 +3815,9 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) cmd->t_task_buf, cmd->data_length); - cmd->se_tfo->queue_data_in(cmd); + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret == -EAGAIN) + goto queue_full; break; case DMA_TO_DEVICE: spin_lock(&cmd->se_lun->lun_sep_lock); @@ -3712,19 +3836,30 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); - cmd->se_tfo->queue_data_in(cmd); + ret = cmd->se_tfo->queue_data_in(cmd); + if (ret == -EAGAIN) + goto queue_full; break; } /* Fall through for DMA_TO_DEVICE */ case DMA_NONE: - cmd->se_tfo->queue_status(cmd); + ret = cmd->se_tfo->queue_status(cmd); + if (ret == -EAGAIN) + goto queue_full; break; default: break; } +done: transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); + return; + +queue_full: + printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p," + " data_direction: %d\n", cmd, cmd->data_direction); + transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); } static void transport_free_dev_tasks(struct se_cmd *cmd) @@ -4866,6 +5001,11 @@ void transport_generic_process_write(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_generic_process_write); +static int transport_write_pending_qf(struct se_cmd *cmd) +{ + return cmd->se_tfo->write_pending(cmd); +} + /* transport_generic_write_pending(): * * @@ -4878,6 +5018,17 @@ static int transport_generic_write_pending(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = TRANSPORT_WRITE_PENDING; spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (cmd->transport_qf_callback) { + ret = cmd->transport_qf_callback(cmd); + if (ret == -EAGAIN) + goto queue_full; + else if (ret < 0) + return ret; + + cmd->transport_qf_callback = NULL; + return 0; + } /* * For the TCM control CDBs using a contiguous buffer, do the memcpy * from the passed Linux/SCSI struct scatterlist located at @@ -4903,10 +5054,19 @@ static int transport_generic_write_pending(struct se_cmd *cmd) * frontend know that WRITE buffers are ready. */ ret = cmd->se_tfo->write_pending(cmd); - if (ret < 0) + if (ret == -EAGAIN) + goto queue_full; + else if (ret < 0) return ret; return PYX_TRANSPORT_WRITE_PENDING; + +queue_full: + printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); + cmd->t_state = TRANSPORT_COMPLETE_QF_WP; + transport_handle_queue_full(cmd, cmd->se_dev, + transport_write_pending_qf); + return ret; } void transport_release_cmd(struct se_cmd *cmd) @@ -5410,8 +5570,7 @@ int transport_send_check_condition_and_sense( cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; after_reason: - cmd->se_tfo->queue_status(cmd); - return 0; + return cmd->se_tfo->queue_status(cmd); } EXPORT_SYMBOL(transport_send_check_condition_and_sense); @@ -5733,7 +5892,9 @@ get_cmd: /* Fall through */ case TRANSPORT_NEW_CMD: ret = transport_generic_new_cmd(cmd); - if (ret < 0) { + if (ret == -EAGAIN) + break; + else if (ret < 0) { cmd->transport_error_status = ret; transport_generic_request_failure(cmd, NULL, 0, (cmd->data_direction != @@ -5763,6 +5924,9 @@ get_cmd: transport_stop_all_task_timers(cmd); transport_generic_request_timeout(cmd); break; + case TRANSPORT_COMPLETE_QF_WP: + transport_generic_write_pending(cmd); + break; default: printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" " %d for ITT: 0x%08x i_state: %d on SE LUN:" diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 71abc4c5e2b4..cd163dd94cd4 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -99,6 +99,7 @@ enum transport_state_table { TRANSPORT_FREE = 15, TRANSPORT_NEW_CMD_MAP = 16, TRANSPORT_FREE_CMD_INTR = 17, + TRANSPORT_COMPLETE_QF_WP = 18, }; /* Used for struct se_cmd->se_cmd_flags */ @@ -125,6 +126,7 @@ enum se_cmd_flags_table { SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, SCF_EMULATE_CDB_ASYNC = 0x01000000, + SCF_EMULATE_QUEUE_FULL = 0x02000000, }; /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ @@ -466,6 +468,7 @@ struct se_cmd { struct list_head se_delayed_node; struct list_head se_ordered_node; struct list_head se_lun_node; + struct list_head se_qf_node; struct se_device *se_dev; struct se_dev_entry *se_deve; struct se_device *se_obj_ptr; @@ -480,6 +483,8 @@ struct se_cmd { void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); void (*transport_wait_for_tasks)(struct se_cmd *, int, int); void (*transport_complete_callback)(struct se_cmd *); + int (*transport_qf_callback)(struct se_cmd *); + unsigned char *t_task_cdb; unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE]; unsigned long long t_task_lba; @@ -743,6 +748,7 @@ struct se_device { atomic_t dev_status_thr_count; atomic_t dev_hoq_count; atomic_t dev_ordered_sync; + atomic_t dev_qf_count; struct se_obj dev_obj; struct se_obj dev_access_obj; struct se_obj dev_export_obj; @@ -758,6 +764,7 @@ struct se_device { spinlock_t dev_status_thr_lock; spinlock_t se_port_lock; spinlock_t se_tmr_lock; + spinlock_t qf_cmd_lock; /* Used for legacy SPC-2 reservationsa */ struct se_node_acl *dev_reserved_node_acl; /* Used for ALUA Logical Unit Group membership */ @@ -771,10 +778,12 @@ struct se_device { struct task_struct *process_thread; pid_t process_thread_pid; struct task_struct *dev_mgmt_thread; + struct work_struct qf_work_queue; struct list_head delayed_cmd_list; struct list_head ordered_cmd_list; struct list_head execute_task_list; struct list_head state_task_list; + struct list_head qf_cmd_list; /* Pointer to associated SE HBA */ struct se_hba *se_hba; struct se_subsystem_dev *se_sub_dev; -- cgit v1.2.3 From e22a7f075226c51f3f71b922e9eeb4f99fac1475 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 5 Jul 2011 13:34:52 -0700 Subject: target: Implement Block Device Characteristics VPD page Implement page B1h, Block Device Characteristics, so that we can report a medium rotation rate of 1 (non-rotating / solid state) if the is_nonrot device attribute is set; we update the iblock backend to set this attribute if the underlying Linux block device has its nonrot flag set. Signed-off-by: Roland Dreier Reviewed-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_cdb.c | 17 +++++++++++++++++ drivers/target/target_core_configfs.c | 4 ++++ drivers/target/target_core_device.c | 13 +++++++++++++ drivers/target/target_core_iblock.c | 3 +++ include/target/target_core_base.h | 1 + include/target/target_core_device.h | 1 + include/target/target_core_transport.h | 2 ++ 7 files changed, 41 insertions(+) (limited to 'include') diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 1157e0c6dba6..432253034de0 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -535,6 +535,22 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) return 0; } +/* Block Device Characteristics VPD page */ +static int +target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) +{ + struct se_device *dev = cmd->se_dev; + + buf[0] = dev->transport->get_device_type(dev); + buf[3] = 0x3c; + + if (cmd->data_length >= 5 && + dev->se_sub_dev->se_dev_attrib.is_nonrot) + buf[5] = 1; + + return 0; +} + /* Thin Provisioning VPD */ static int target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) @@ -599,6 +615,7 @@ static struct { { .page = 0x83, .emulate = target_emulate_evpd_83 }, { .page = 0x86, .emulate = target_emulate_evpd_86 }, { .page = 0xb0, .emulate = target_emulate_evpd_b0 }, + { .page = 0xb1, .emulate = target_emulate_evpd_b1 }, { .page = 0xb2, .emulate = target_emulate_evpd_b2 }, }; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 8d2aba51fc8b..6b00810b8dcb 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -698,6 +698,9 @@ SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(enforce_pr_isids); SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); +DEF_DEV_ATTRIB(is_nonrot); +SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); + DEF_DEV_ATTRIB_RO(hw_block_size); SE_DEV_ATTR_RO(hw_block_size); @@ -746,6 +749,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_emulate_tpu.attr, &target_core_dev_attrib_emulate_tpws.attr, &target_core_dev_attrib_enforce_pr_isids.attr, + &target_core_dev_attrib_is_nonrot.attr, &target_core_dev_attrib_hw_block_size.attr, &target_core_dev_attrib_block_size.attr, &target_core_dev_attrib_hw_max_sectors.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index f13e2941936c..440e6b69d47b 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -853,6 +853,7 @@ void se_dev_set_default_attribs( dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS; dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; + dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT; /* * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK * iblock_create_virtdevice() from struct queue_limits values @@ -1117,6 +1118,18 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) return 0; } +int se_dev_set_is_nonrot(struct se_device *dev, int flag) +{ + if ((flag != 0) && (flag != 1)) { + printk(KERN_ERR "Illegal value %d\n", flag); + return -EINVAL; + } + dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; + printk(KERN_INFO "dev[%p]: SE Device is_nonrot bit: %d\n", + dev, flag); + return 0; +} + /* * Note, this can only be called on unexported SE Device Object. */ diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 160c484fd3da..392e75fb1087 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -196,6 +196,9 @@ static struct se_device *iblock_create_virtdevice( " disabled by default\n"); } + if (blk_queue_nonrot(q)) + dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; + return dev; failed: diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index cd163dd94cd4..81deb399bf6a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -661,6 +661,7 @@ struct se_dev_attrib { int emulate_reservations; int emulate_alua; int enforce_pr_isids; + int is_nonrot; u32 hw_block_size; u32 block_size; u32 hw_max_sectors; diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h index 96586cc94984..f3b6ae655454 100644 --- a/include/target/target_core_device.h +++ b/include/target/target_core_device.h @@ -39,6 +39,7 @@ extern int se_dev_set_emulate_tas(struct se_device *, int); extern int se_dev_set_emulate_tpu(struct se_device *, int); extern int se_dev_set_emulate_tpws(struct se_device *, int); extern int se_dev_set_enforce_pr_isids(struct se_device *, int); +extern int se_dev_set_is_nonrot(struct se_device *, int); extern int se_dev_set_queue_depth(struct se_device *, u32); extern int se_dev_set_max_sectors(struct se_device *, u32); extern int se_dev_set_optimal_sectors(struct se_device *, u32); diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 2aae76412377..b27ce1af698b 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -101,6 +101,8 @@ #define DA_ENFORCE_PR_ISIDS 1 #define DA_STATUS_MAX_SECTORS_MIN 16 #define DA_STATUS_MAX_SECTORS_MAX 8192 +/* By default don't report non-rotating (solid state) medium */ +#define DA_IS_NONROT 0 #define SE_MODE_PAGE_BUF 512 -- cgit v1.2.3 From 05d1c7c0d0db4cc25548d9aadebb416888a82327 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Wed, 20 Jul 2011 19:13:28 +0000 Subject: target: Make all control CDBs scatter-gather Previously, some control CDBs did not allocate memory in pages for their data buffer, but just did a kmalloc. This patch makes all cdbs allocate pages. This has the benefit of streamlining some paths that had to behave differently when we used two allocation methods. The downside is that all accesses to the data buffer need to kmap it before use, and need to handle data in page-sized chunks if more than a page is needed for a given command's data buffer. Finally, note that cdbs with no data buffers are handled a little differently. Before, SCSI_NON_DATA_CDBs would not call get_mem at all (they'd be in the final else in transport_allocate_resources) but now these will make it into generic_get_mem, but just not allocate any buffers. Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_alua.c | 42 ++++++--- drivers/target/target_core_cdb.c | 69 ++++++++++---- drivers/target/target_core_device.c | 5 +- drivers/target/target_core_pr.c | 60 ++++++++++--- drivers/target/target_core_pscsi.c | 32 +------ drivers/target/target_core_transport.c | 159 ++++++++++++--------------------- drivers/target/tcm_fc/tfc_cmd.c | 4 +- drivers/target/tcm_fc/tfc_io.c | 61 ++++--------- include/target/target_core_base.h | 5 +- include/target/target_core_transport.h | 6 +- 10 files changed, 218 insertions(+), 225 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 76d506fe99e0..dba412ff3718 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -65,10 +65,12 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) struct se_port *port; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)cmd->t_task_buf; + unsigned char *buf; u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first Target port group descriptor */ + buf = transport_kmap_first_data_page(cmd); + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { @@ -141,6 +143,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd) buf[2] = ((rd_len >> 8) & 0xff); buf[3] = (rd_len & 0xff); + transport_kunmap_first_data_page(cmd); + return 0; } @@ -157,14 +161,17 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) struct se_node_acl *nacl = cmd->se_sess->se_node_acl; struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem; - unsigned char *buf = (unsigned char *)cmd->t_task_buf; - unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */ + unsigned char *buf; + unsigned char *ptr; u32 len = 4; /* Skip over RESERVED area in header */ int alua_access_state, primary = 0, rc; u16 tg_pt_id, rtpi; if (!(l_port)) return PYX_TRANSPORT_LU_COMM_FAILURE; + + buf = transport_kmap_first_data_page(cmd); + /* * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. @@ -172,14 +179,16 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; if (!(l_tg_pt_gp_mem)) { printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + goto out; } spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; if (!(l_tg_pt_gp)) { spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + goto out; } rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); @@ -187,9 +196,12 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) if (!(rc)) { printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICT_ALUA is disabled\n"); - return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; + goto out; } + ptr = &buf[4]; /* Skip over RESERVED area in header */ + while (len < cmd->data_length) { alua_access_state = (ptr[0] & 0x0f); /* @@ -209,7 +221,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * REQUEST, and the additional sense code set to INVALID * FIELD IN PARAMETER LIST. */ - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; } rc = -1; /* @@ -260,8 +273,10 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * If not matching target port group ID can be located * throw an exception with ASCQ: INVALID_PARAMETER_LIST */ - if (rc != 0) - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + if (rc != 0) { + rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } } else { /* * Extact the RELATIVE TARGET PORT IDENTIFIER to identify @@ -295,14 +310,19 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * be located, throw an exception with ASCQ: * INVALID_PARAMETER_LIST */ - if (rc != 0) - return PYX_TRANSPORT_INVALID_PARAMETER_LIST; + if (rc != 0) { + rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST; + goto out; + } } ptr += 4; len += 4; } +out: + transport_kunmap_first_data_page(cmd); + return 0; } diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 432253034de0..418282d926fa 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -66,7 +66,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task_buf; + unsigned char *buf; /* * Make sure we at least have 6 bytes of INQUIRY response @@ -78,6 +78,8 @@ target_emulate_inquiry_std(struct se_cmd *cmd) return -EINVAL; } + buf = transport_kmap_first_data_page(cmd); + buf[0] = dev->transport->get_device_type(dev); if (buf[0] == TYPE_TAPE) buf[1] = 0x80; @@ -91,7 +93,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) if (cmd->data_length < 8) { buf[4] = 1; /* Set additional length to 1 */ - return 0; + goto out; } buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ @@ -102,7 +104,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) */ if (cmd->data_length < 36) { buf[4] = 3; /* Set additional length to 3 */ - return 0; + goto out; } snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); @@ -111,6 +113,9 @@ target_emulate_inquiry_std(struct se_cmd *cmd) snprintf((unsigned char *)&buf[32], 4, "%s", &dev->se_sub_dev->t10_wwn.revision[0]); buf[4] = 31; /* Set additional length to 31 */ + +out: + transport_kunmap_first_data_page(cmd); return 0; } @@ -647,9 +652,9 @@ static int target_emulate_inquiry(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task_buf; + unsigned char *buf; unsigned char *cdb = cmd->t_task_cdb; - int p; + int p, ret; if (!(cdb[1] & 0x1)) return target_emulate_inquiry_std(cmd); @@ -666,14 +671,20 @@ target_emulate_inquiry(struct se_cmd *cmd) " too small for EVPD=1\n", cmd->data_length); return -EINVAL; } + + buf = transport_kmap_first_data_page(cmd); + buf[0] = dev->transport->get_device_type(dev); for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) if (cdb[2] == evpd_handlers[p].page) { buf[1] = cdb[2]; - return evpd_handlers[p].emulate(cmd, buf); + ret = evpd_handlers[p].emulate(cmd, buf); + transport_kunmap_first_data_page(cmd); + return ret; } + transport_kunmap_first_data_page(cmd); printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); return -EINVAL; } @@ -682,7 +693,7 @@ static int target_emulate_readcapacity(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task_buf; + unsigned char *buf; unsigned long long blocks_long = dev->transport->get_blocks(dev); u32 blocks; @@ -691,6 +702,8 @@ target_emulate_readcapacity(struct se_cmd *cmd) else blocks = (u32)blocks_long; + buf = transport_kmap_first_data_page(cmd); + buf[0] = (blocks >> 24) & 0xff; buf[1] = (blocks >> 16) & 0xff; buf[2] = (blocks >> 8) & 0xff; @@ -705,6 +718,8 @@ target_emulate_readcapacity(struct se_cmd *cmd) if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) put_unaligned_be32(0xFFFFFFFF, &buf[0]); + transport_kunmap_first_data_page(cmd); + return 0; } @@ -712,9 +727,11 @@ static int target_emulate_readcapacity_16(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task_buf; + unsigned char *buf; unsigned long long blocks = dev->transport->get_blocks(dev); + buf = transport_kmap_first_data_page(cmd); + buf[0] = (blocks >> 56) & 0xff; buf[1] = (blocks >> 48) & 0xff; buf[2] = (blocks >> 40) & 0xff; @@ -734,6 +751,8 @@ target_emulate_readcapacity_16(struct se_cmd *cmd) if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) buf[14] = 0x80; + transport_kunmap_first_data_page(cmd); + return 0; } @@ -848,7 +867,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) { struct se_device *dev = cmd->se_dev; char *cdb = cmd->t_task_cdb; - unsigned char *rbuf = cmd->t_task_buf; + unsigned char *rbuf; int type = dev->transport->get_device_type(dev); int offset = (ten) ? 8 : 4; int length = 0; @@ -911,7 +930,10 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) if ((offset + 1) > cmd->data_length) offset = cmd->data_length; } + + rbuf = transport_kmap_first_data_page(cmd); memcpy(rbuf, buf, offset); + transport_kunmap_first_data_page(cmd); return 0; } @@ -920,14 +942,18 @@ static int target_emulate_request_sense(struct se_cmd *cmd) { unsigned char *cdb = cmd->t_task_cdb; - unsigned char *buf = cmd->t_task_buf; + unsigned char *buf; u8 ua_asc = 0, ua_ascq = 0; + int err = 0; if (cdb[1] & 0x01) { printk(KERN_ERR "REQUEST_SENSE description emulation not" " supported\n"); return PYX_TRANSPORT_INVALID_CDB_FIELD; } + + buf = transport_kmap_first_data_page(cmd); + if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { /* * CURRENT ERROR, UNIT ATTENTION @@ -940,7 +966,8 @@ target_emulate_request_sense(struct se_cmd *cmd) */ if (cmd->data_length <= 18) { buf[7] = 0x00; - return 0; + err = -EINVAL; + goto end; } /* * The Additional Sense Code (ASC) from the UNIT ATTENTION @@ -960,7 +987,8 @@ target_emulate_request_sense(struct se_cmd *cmd) */ if (cmd->data_length <= 18) { buf[7] = 0x00; - return 0; + err = -EINVAL; + goto end; } /* * NO ADDITIONAL SENSE INFORMATION @@ -969,6 +997,9 @@ target_emulate_request_sense(struct se_cmd *cmd) buf[7] = 0x0A; } +end: + transport_kunmap_first_data_page(cmd); + return 0; } @@ -981,11 +1012,11 @@ target_emulate_unmap(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; - unsigned char *buf = cmd->t_task_buf, *ptr = NULL; + unsigned char *buf, *ptr = NULL; unsigned char *cdb = &cmd->t_task_cdb[0]; sector_t lba; unsigned int size = cmd->data_length, range; - int ret, offset; + int ret = 0, offset; unsigned short dl, bd_dl; /* First UNMAP block descriptor starts at 8 byte offset */ @@ -993,6 +1024,9 @@ target_emulate_unmap(struct se_task *task) size -= 8; dl = get_unaligned_be16(&cdb[0]); bd_dl = get_unaligned_be16(&cdb[2]); + + buf = transport_kmap_first_data_page(cmd); + ptr = &buf[offset]; printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); @@ -1007,7 +1041,7 @@ target_emulate_unmap(struct se_task *task) if (ret < 0) { printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", ret); - return ret; + goto err; } ptr += 16; @@ -1016,7 +1050,10 @@ target_emulate_unmap(struct se_task *task) task->task_scsi_status = GOOD; transport_complete_task(task, 1); - return 0; +err: + transport_kunmap_first_data_page(cmd); + + return ret; } /* diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 440e6b69d47b..1185c3b76d47 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -657,7 +657,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) struct se_lun *se_lun; struct se_session *se_sess = se_cmd->se_sess; struct se_task *se_task; - unsigned char *buf = se_cmd->t_task_buf; + unsigned char *buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) @@ -668,6 +668,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) return PYX_TRANSPORT_LU_COMM_FAILURE; } + buf = transport_kmap_first_data_page(se_cmd); + /* * If no struct se_session pointer is present, this struct se_cmd is * coming via a target_core_mod PASSTHROUGH op, and not through @@ -704,6 +706,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) * See SPC3 r07, page 159. */ done: + transport_kunmap_first_data_page(se_cmd); lun_count *= 8; buf[0] = ((lun_count >> 24) & 0xff); buf[1] = ((lun_count >> 16) & 0xff); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 4fdede8da0c6..3342843f8619 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port( struct list_head tid_dest_list; struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; struct target_core_fabric_ops *tmp_tf_ops; - unsigned char *buf = (unsigned char *)cmd->t_task_buf; + unsigned char *buf; unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tpdl, tid_len = 0; @@ -1524,6 +1524,8 @@ static int core_scsi3_decode_spec_i_port( */ tidh_new->dest_local_nexus = 1; list_add_tail(&tidh_new->dest_list, &tid_dest_list); + + buf = transport_kmap_first_data_page(cmd); /* * For a PERSISTENT RESERVE OUT specify initiator ports payload, * first extract TransportID Parameter Data Length, and make sure @@ -1760,6 +1762,9 @@ static int core_scsi3_decode_spec_i_port( tid_len = 0; } + + transport_kunmap_first_data_page(cmd); + /* * Go ahead and create a registrations from tid_dest_list for the * SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl @@ -1806,6 +1811,7 @@ static int core_scsi3_decode_spec_i_port( return 0; out: + transport_kunmap_first_data_page(cmd); /* * For the failure case, release everything from tid_dest_list * including *dest_pr_reg and the configfs dependances.. @@ -3307,7 +3313,7 @@ static int core_scsi3_emulate_pro_register_and_move( struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops; struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg; struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task_buf; + unsigned char *buf; unsigned char *initiator_str; char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN]; u32 tid_len, tmp_tid_len; @@ -3357,17 +3363,21 @@ static int core_scsi3_emulate_pro_register_and_move( core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } + /* * Determine the Relative Target Port Identifier where the reservation * will be moved to for the TransportID containing SCSI initiator WWN * information. */ + buf = transport_kmap_first_data_page(cmd); rtpi = (buf[18] & 0xff) << 8; rtpi |= buf[19] & 0xff; tid_len = (buf[20] & 0xff) << 24; tid_len |= (buf[21] & 0xff) << 16; tid_len |= (buf[22] & 0xff) << 8; tid_len |= buf[23] & 0xff; + transport_kunmap_first_data_page(cmd); + buf = NULL; if ((tid_len + 24) != cmd->data_length) { printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header" @@ -3414,6 +3424,8 @@ static int core_scsi3_emulate_pro_register_and_move( core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } + + buf = transport_kmap_first_data_page(cmd); proto_ident = (buf[24] & 0x0f); #if 0 printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" @@ -3444,6 +3456,9 @@ static int core_scsi3_emulate_pro_register_and_move( goto out; } + transport_kunmap_first_data_page(cmd); + buf = NULL; + printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s" " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? "port" : "device", initiator_str, (iport_ptr != NULL) ? @@ -3696,9 +3711,13 @@ after_iport_check: " REGISTER_AND_MOVE\n"); } + transport_kunmap_first_data_page(cmd); + core_scsi3_put_pr_reg(dest_pr_reg); return 0; out: + if (buf) + transport_kunmap_first_data_page(cmd); if (dest_se_deve) core_scsi3_lunacl_undepend_item(dest_se_deve); if (dest_node_acl) @@ -3723,7 +3742,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) */ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) { - unsigned char *buf = (unsigned char *)cmd->t_task_buf; + unsigned char *buf; u64 res_key, sa_res_key; int sa, scope, type, aptpl; int spec_i_pt = 0, all_tg_pt = 0, unreg = 0; @@ -3745,6 +3764,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) sa = (cdb[1] & 0x1f); scope = (cdb[2] & 0xf0); type = (cdb[2] & 0x0f); + + buf = transport_kmap_first_data_page(cmd); /* * From PERSISTENT_RESERVE_OUT parameter list (payload) */ @@ -3762,6 +3783,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) aptpl = (buf[17] & 0x01); unreg = (buf[17] & 0x02); } + transport_kunmap_first_data_page(cmd); + buf = NULL; + /* * SPEC_I_PT=1 is only valid for Service action: REGISTER */ @@ -3830,7 +3854,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) struct se_device *se_dev = cmd->se_dev; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)cmd->t_task_buf; + unsigned char *buf; u32 add_len = 0, off = 8; if (cmd->data_length < 8) { @@ -3839,6 +3863,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) return PYX_TRANSPORT_INVALID_CDB_FIELD; } + buf = transport_kmap_first_data_page(cmd); buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); @@ -3872,6 +3897,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) buf[6] = ((add_len >> 8) & 0xff); buf[7] = (add_len & 0xff); + transport_kunmap_first_data_page(cmd); + return 0; } @@ -3885,7 +3912,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) struct se_device *se_dev = cmd->se_dev; struct se_subsystem_dev *su_dev = se_dev->se_sub_dev; struct t10_pr_registration *pr_reg; - unsigned char *buf = (unsigned char *)cmd->t_task_buf; + unsigned char *buf; u64 pr_res_key; u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ @@ -3895,6 +3922,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) return PYX_TRANSPORT_INVALID_CDB_FIELD; } + buf = transport_kmap_first_data_page(cmd); buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); @@ -3911,10 +3939,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) buf[6] = ((add_len >> 8) & 0xff); buf[7] = (add_len & 0xff); - if (cmd->data_length < 22) { - spin_unlock(&se_dev->dev_reservation_lock); - return 0; - } + if (cmd->data_length < 22) + goto err; + /* * Set the Reservation key. * @@ -3951,7 +3978,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) buf[21] = (pr_reg->pr_res_scope & 0xf0) | (pr_reg->pr_res_type & 0x0f); } + +err: spin_unlock(&se_dev->dev_reservation_lock); + transport_kunmap_first_data_page(cmd); return 0; } @@ -3965,7 +3995,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task_buf; + unsigned char *buf; u16 add_len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { @@ -3974,6 +4004,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) return PYX_TRANSPORT_INVALID_CDB_FIELD; } + buf = transport_kmap_first_data_page(cmd); + buf[0] = ((add_len << 8) & 0xff); buf[1] = (add_len & 0xff); buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ @@ -4004,6 +4036,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + transport_kunmap_first_data_page(cmd); + return 0; } @@ -4020,7 +4054,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) struct se_portal_group *se_tpg; struct t10_pr_registration *pr_reg, *pr_reg_tmp; struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr; - unsigned char *buf = (unsigned char *)cmd->t_task_buf; + unsigned char *buf; u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len; u32 off = 8; /* off into first Full Status descriptor */ int format_code = 0; @@ -4031,6 +4065,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) return PYX_TRANSPORT_INVALID_CDB_FIELD; } + buf = transport_kmap_first_data_page(cmd); + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); @@ -4150,6 +4186,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) buf[6] = ((add_len >> 8) & 0xff); buf[7] = (add_len & 0xff); + transport_kunmap_first_data_page(cmd); + return 0; } diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 3574c520a5f4..d9569242e3dc 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -695,7 +695,7 @@ static int pscsi_transport_complete(struct se_task *task) if (task->task_se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { - unsigned char *buf = task->task_se_cmd->t_task_buf; + unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd); if (cdb[0] == MODE_SENSE_10) { if (!(buf[3] & 0x80)) @@ -704,6 +704,8 @@ static int pscsi_transport_complete(struct se_task *task) if (!(buf[2] & 0x80)) buf[2] |= 0x80; } + + transport_kunmap_first_data_page(task->task_se_cmd); } } after_mode_sense: @@ -1246,33 +1248,6 @@ static int pscsi_map_task_SG(struct se_task *task) return 0; } -/* pscsi_map_task_non_SG(): - * - * - */ -static int pscsi_map_task_non_SG(struct se_task *task) -{ - struct se_cmd *cmd = task->task_se_cmd; - struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - int ret = 0; - - if (pscsi_blk_get_request(task) < 0) - return PYX_TRANSPORT_LU_COMM_FAILURE; - - if (!task->task_size) - return 0; - - ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, - pt->pscsi_req, cmd->t_task_buf, - task->task_size, GFP_KERNEL); - if (ret < 0) { - printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - return 0; -} - static int pscsi_CDB_none(struct se_task *task) { return pscsi_blk_get_request(task); @@ -1392,7 +1367,6 @@ static struct se_subsystem_api pscsi_template = { .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, .cdb_none = pscsi_CDB_none, - .map_task_non_SG = pscsi_map_task_non_SG, .map_task_SG = pscsi_map_task_SG, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 056c4cb4736d..c681c812dd64 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -213,7 +213,7 @@ static u32 transport_allocate_tasks(struct se_cmd *cmd, unsigned long long starting_lba, u32 sectors, enum dma_data_direction data_direction, struct list_head *mem_list, int set_counts); -static int transport_generic_get_mem(struct se_cmd *cmd, u32 length); +static int transport_generic_get_mem(struct se_cmd *cmd); static int transport_generic_remove(struct se_cmd *cmd, int session_reinstatement); static int transport_cmd_get_valid_sectors(struct se_cmd *cmd); @@ -2233,23 +2233,6 @@ static void transport_generic_request_timeout(struct se_cmd *cmd) transport_generic_remove(cmd, 0); } -static int -transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length) -{ - unsigned char *buf; - - buf = kzalloc(data_length, GFP_KERNEL); - if (!(buf)) { - printk(KERN_ERR "Unable to allocate memory for buffer\n"); - return -ENOMEM; - } - - cmd->t_tasks_se_num = 0; - cmd->t_task_buf = buf; - - return 0; -} - static inline u32 transport_lba_21(unsigned char *cdb) { return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; @@ -2966,19 +2949,6 @@ static int transport_get_sense_data(struct se_cmd *cmd) return -1; } -static int transport_allocate_resources(struct se_cmd *cmd) -{ - u32 length = cmd->data_length; - - if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || - (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) - return transport_generic_get_mem(cmd, length); - else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) - return transport_generic_allocate_buf(cmd, length); - else - return 0; -} - static int transport_handle_reservation_conflict(struct se_cmd *cmd) { @@ -3265,7 +3235,7 @@ static int transport_generic_cmd_sequencer( /* GPCMD_SEND_KEY from multi media commands */ size = (cdb[8] << 8) + cdb[9]; } - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case MODE_SELECT: size = cdb[4]; @@ -3277,7 +3247,7 @@ static int transport_generic_cmd_sequencer( break; case MODE_SENSE: size = cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case MODE_SENSE_10: case GPCMD_READ_BUFFER_CAPACITY: @@ -3285,11 +3255,11 @@ static int transport_generic_cmd_sequencer( case LOG_SELECT: case LOG_SENSE: size = (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_BLOCK_LIMITS: size = READ_BLOCK_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case GPCMD_GET_CONFIGURATION: case GPCMD_READ_FORMAT_CAPACITIES: @@ -3305,7 +3275,7 @@ static int transport_generic_cmd_sequencer( SPC3_PERSISTENT_RESERVATIONS) ? core_scsi3_emulate_pr : NULL; size = (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case GPCMD_MECHANISM_STATUS: case GPCMD_READ_DVD_STRUCTURE: @@ -3314,7 +3284,7 @@ static int transport_generic_cmd_sequencer( break; case READ_POSITION: size = READ_POSITION_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case MAINTENANCE_OUT: if (dev->transport->get_device_type(dev) != TYPE_ROM) { @@ -3336,7 +3306,7 @@ static int transport_generic_cmd_sequencer( /* GPCMD_REPORT_KEY from multi media commands */ size = (cdb[8] << 8) + cdb[9]; } - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case INQUIRY: size = (cdb[3] << 8) + cdb[4]; @@ -3346,21 +3316,21 @@ static int transport_generic_cmd_sequencer( */ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_CAPACITY: size = READ_CAP_LEN; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_MEDIA_SERIAL_NUMBER: case SECURITY_PROTOCOL_IN: case SECURITY_PROTOCOL_OUT: size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case SERVICE_ACTION_IN: case ACCESS_CONTROL_IN: @@ -3371,36 +3341,36 @@ static int transport_generic_cmd_sequencer( case WRITE_ATTRIBUTE: size = (cdb[10] << 24) | (cdb[11] << 16) | (cdb[12] << 8) | cdb[13]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: size = (cdb[3] << 8) | cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */ #if 0 case GPCMD_READ_CD: sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; size = (2336 * sectors); - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; #endif case READ_TOC: size = cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case REQUEST_SENSE: size = cdb[4]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case READ_ELEMENT_STATUS: size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case WRITE_BUFFER: size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case RESERVE: case RESERVE_10: @@ -3480,7 +3450,7 @@ static int transport_generic_cmd_sequencer( break; case UNMAP: size = get_unaligned_be16(&cdb[7]); - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; case WRITE_SAME_16: sectors = transport_get_sectors_16(cdb, cmd, §or_ret); @@ -3547,7 +3517,7 @@ static int transport_generic_cmd_sequencer( */ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) cmd->sam_task_attr = MSG_HEAD_TAG; - cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB; + cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; default: printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" @@ -3804,16 +3774,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) cmd->data_length; } spin_unlock(&cmd->se_lun->lun_sep_lock); - /* - * If enabled by TCM fabric module pre-registered SGL - * memory, perform the memcpy() from the TCM internal - * contiguous buffer back to the original SGL. - */ - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) - sg_copy_from_buffer(cmd->t_task_pt_sgl, - cmd->t_task_pt_sgl_num, - cmd->t_task_buf, - cmd->data_length); ret = cmd->se_tfo->queue_data_in(cmd); if (ret == -EAGAIN) @@ -3899,12 +3859,6 @@ static inline void transport_free_pages(struct se_cmd *cmd) if (cmd->se_dev->transport->do_se_mem_map) free_page = 0; - if (cmd->t_task_buf) { - kfree(cmd->t_task_buf); - cmd->t_task_buf = NULL; - return; - } - list_for_each_entry_safe(se_mem, se_mem_tmp, &cmd->t_mem_list, se_list) { /* @@ -4068,25 +4022,6 @@ int transport_generic_map_mem_to_cmd( cmd->t_tasks_se_bidi_num = ret; } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; - - } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { - if (sgl_bidi || sgl_bidi_count) { - printk(KERN_ERR "BIDI-Commands not supported using " - "SCF_SCSI_CONTROL_NONSG_IO_CDB\n"); - return -ENOSYS; - } - /* - * For incoming CDBs using a contiguous buffer internal with TCM, - * save the passed struct scatterlist memory. After TCM storage object - * processing has completed for this struct se_cmd, TCM core will call - * transport_memcpy_[write,read]_contig() as necessary from - * transport_generic_complete_ok() and transport_write_pending() in order - * to copy the TCM buffer to/from the original passed *mem in SGL -> - * struct scatterlist format. - */ - cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG; - cmd->t_task_pt_sgl = sgl; - cmd->t_task_pt_sgl_num = sgl_count; } return 0; @@ -4184,10 +4119,41 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) return 0; } +void *transport_kmap_first_data_page(struct se_cmd *cmd) +{ + struct se_mem *se_mem; + + BUG_ON(list_empty(&cmd->t_mem_list)); + + se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); + + /* + * 1st se_mem should point to a page, and we shouldn't need more than + * that for this cmd + */ + BUG_ON(cmd->data_length > PAGE_SIZE); + + return kmap(se_mem->se_page); +} +EXPORT_SYMBOL(transport_kmap_first_data_page); + +void transport_kunmap_first_data_page(struct se_cmd *cmd) +{ + struct se_mem *se_mem; + + BUG_ON(list_empty(&cmd->t_mem_list)); + + se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); + + kunmap(se_mem->se_page); +} +EXPORT_SYMBOL(transport_kunmap_first_data_page); + static int -transport_generic_get_mem(struct se_cmd *cmd, u32 length) +transport_generic_get_mem(struct se_cmd *cmd) { struct se_mem *se_mem; + int length = cmd->data_length; /* * If the device uses memory mapping this is enough. @@ -4195,6 +4161,7 @@ transport_generic_get_mem(struct se_cmd *cmd, u32 length) if (cmd->se_dev->transport->do_se_mem_map) return 0; + /* Even cmds with length 0 will get here, btw */ while (length) { se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); if (!(se_mem)) { @@ -4851,10 +4818,6 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) if (dev->transport->map_task_SG) return dev->transport->map_task_SG(task); return 0; - } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) { - if (dev->transport->map_task_non_SG) - return dev->transport->map_task_non_SG(task); - return 0; } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { if (dev->transport->cdb_none) return dev->transport->cdb_none(task); @@ -4887,7 +4850,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd) * cmd->t_mem_list of struct se_mem->se_page */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { - ret = transport_allocate_resources(cmd); + ret = transport_generic_get_mem(cmd); if (ret < 0) return ret; } @@ -5029,17 +4992,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) cmd->transport_qf_callback = NULL; return 0; } - /* - * For the TCM control CDBs using a contiguous buffer, do the memcpy - * from the passed Linux/SCSI struct scatterlist located at - * se_cmd->t_task_pt_sgl to the contiguous buffer at - * se_cmd->t_task_buf. - */ - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG) - sg_copy_to_buffer(cmd->t_task_pt_sgl, - cmd->t_task_pt_sgl_num, - cmd->t_task_buf, - cmd->data_length); + /* * Clear the se_cmd for WRITE_PENDING status in order to set * cmd->t_transport_active=0 so that transport_generic_handle_data diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index a00951c80324..1017f56bbbcc 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -71,9 +71,9 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) caller, cmd, cmd->cdb); printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); - printk(KERN_INFO "%s: cmd %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n", + printk(KERN_INFO "%s: cmd %p se_num %u len %u se_cmd_flags <0x%x>\n", caller, cmd, se_cmd->t_tasks_se_num, - se_cmd->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags); + se_cmd->data_length, se_cmd->se_cmd_flags); list_for_each_entry(mem, &se_cmd->t_mem_list, se_list) printk(KERN_INFO "%s: cmd %p mem %p page %p " diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 8560182f0dad..837660728563 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -92,19 +92,15 @@ int ft_queue_data_in(struct se_cmd *se_cmd) remaining = se_cmd->data_length; /* - * Setup to use first mem list entry if any. + * Setup to use first mem list entry, unless no data. */ - if (se_cmd->t_tasks_se_num) { + BUG_ON(remaining && list_empty(&se_cmd->t_mem_list)); + if (remaining) { mem = list_first_entry(&se_cmd->t_mem_list, struct se_mem, se_list); mem_len = mem->se_len; mem_off = mem->se_off; page = mem->se_page; - } else { - mem = NULL; - mem_len = remaining; - mem_off = 0; - page = NULL; } /* no scatter/gather in skb for odd word length due to fc_seq_send() */ @@ -145,18 +141,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) tlen = min(mem_len, frame_len); if (use_sg) { - if (!mem) { - BUG_ON(!se_cmd->t_task_buf); - page_addr = se_cmd->t_task_buf + mem_off; - /* - * In this case, offset is 'offset_in_page' of - * (t_task_buf + mem_off) instead of 'mem_off'. - */ - off_in_page = offset_in_page(page_addr); - page = virt_to_page(page_addr); - tlen = min(tlen, PAGE_SIZE - off_in_page); - } else - off_in_page = mem_off; + off_in_page = mem_off; BUG_ON(!page); get_page(page); skb_fill_page_desc(fp_skb(fp), @@ -166,7 +151,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) fp_skb(fp)->data_len += tlen; fp_skb(fp)->truesize += PAGE_SIZE << compound_order(page); - } else if (mem) { + } else { BUG_ON(!page); from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), KM_SOFTIRQ0); @@ -177,10 +162,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd) memcpy(to, from, tlen); kunmap_atomic(page_addr, KM_SOFTIRQ0); to += tlen; - } else { - from = se_cmd->t_task_buf + mem_off; - memcpy(to, from, tlen); - to += tlen; } mem_off += tlen; @@ -305,19 +286,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) frame_len = se_cmd->data_length - rel_off; /* - * Setup to use first mem list entry if any. + * Setup to use first mem list entry, unless no data. */ - if (se_cmd->t_tasks_se_num) { + BUG_ON(frame_len && list_empty(&se_cmd->t_mem_list)); + if (frame_len) { mem = list_first_entry(&se_cmd->t_mem_list, struct se_mem, se_list); mem_len = mem->se_len; mem_off = mem->se_off; page = mem->se_page; - } else { - mem = NULL; - page = NULL; - mem_off = 0; - mem_len = frame_len; } while (frame_len) { @@ -340,19 +317,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) tlen = min(mem_len, frame_len); - if (mem) { - to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), - KM_SOFTIRQ0); - page_addr = to; - to += mem_off & ~PAGE_MASK; - tlen = min(tlen, (size_t)(PAGE_SIZE - - (mem_off & ~PAGE_MASK))); - memcpy(to, from, tlen); - kunmap_atomic(page_addr, KM_SOFTIRQ0); - } else { - to = se_cmd->t_task_buf + mem_off; - memcpy(to, from, tlen); - } + to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), + KM_SOFTIRQ0); + page_addr = to; + to += mem_off & ~PAGE_MASK; + tlen = min(tlen, (size_t)(PAGE_SIZE - + (mem_off & ~PAGE_MASK))); + memcpy(to, from, tlen); + kunmap_atomic(page_addr, KM_SOFTIRQ0); + from += tlen; frame_len -= tlen; mem_off += tlen; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 81deb399bf6a..f67ca98394d3 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -109,7 +109,6 @@ enum se_cmd_flags_table { SCF_EMULATED_TASK_SENSE = 0x00000004, SCF_SCSI_DATA_SG_IO_CDB = 0x00000008, SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010, - SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020, SCF_SCSI_NON_DATA_CDB = 0x00000040, SCF_SCSI_CDB_EXCEPTION = 0x00000080, SCF_SCSI_RESERVATION_CONFLICT = 0x00000100, @@ -123,7 +122,6 @@ enum se_cmd_flags_table { SCF_ALUA_NON_OPTIMIZED = 0x00040000, SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000, SCF_UNUSED = 0x00100000, - SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000, SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000, SCF_EMULATE_CDB_ASYNC = 0x01000000, SCF_EMULATE_QUEUE_FULL = 0x02000000, @@ -516,8 +514,7 @@ struct se_cmd { struct completion transport_lun_fe_stop_comp; struct completion transport_lun_stop_comp; struct scatterlist *t_tasks_sg_chained; - struct scatterlist t_tasks_sg_bounce; - void *t_task_buf; + /* * Used for pre-registered fabric SGL passthrough WRITE and READ * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index b27ce1af698b..123df9238c68 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -165,6 +165,8 @@ extern void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *, struct se_session *, u32, int, int, unsigned char *); +void *transport_kmap_first_data_page(struct se_cmd *cmd); +void transport_kunmap_first_data_page(struct se_cmd *cmd); extern void transport_free_se_cmd(struct se_cmd *); extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); extern int transport_generic_handle_cdb(struct se_cmd *); @@ -236,10 +238,6 @@ struct se_subsystem_api { * For SCF_SCSI_NON_DATA_CDB */ int (*cdb_none)(struct se_task *); - /* - * For SCF_SCSI_CONTROL_NONSG_IO_CDB - */ - int (*map_task_non_SG)(struct se_task *); /* * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB */ -- cgit v1.2.3 From d0229ae3fed59b4009e33f836d9ad4e312294d46 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Fri, 8 Jul 2011 17:04:53 -0700 Subject: target: Enforce 1 page max for control cdb buffer sizes Due to all cdbs' data buffers being referenced by scatterlists, buffers of more than a page are not contiguous. Instead of handling this in all control command handlers, we may be able to get away with just limiting control cdb data buffers to one page. The only control CDBs we handle that have potentially large data buffers are REPORT LUNS and UNMAP, so if we didn't want to live with this limitation, they would need to be modified to walk the pages in the data buffer's sgl. Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_transport.c | 5 +++++ include/target/target_core_base.h | 1 + 2 files changed, 6 insertions(+) (limited to 'include') diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c681c812dd64..267113c1607c 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3562,6 +3562,11 @@ static int transport_generic_cmd_sequencer( cmd->data_length = size; } + /* Let's limit control cdbs to a page, for simplicity's sake. */ + if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && + size > PAGE_SIZE) + goto out_invalid_cdb_field; + transport_set_supported_SAM_opcode(cmd); return ret; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index f67ca98394d3..bb9ef9f715a3 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -15,6 +15,7 @@ /* Used by transport_generic_allocate_iovecs() */ #define TRANSPORT_IOV_DATA_BUFFER 5 /* Maximum Number of LUNs per Target Portal Group */ +/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */ #define TRANSPORT_MAX_LUNS_PER_TPG 256 /* * By default we use 32-byte CDBs in TCM Core and subsystem plugin code. -- cgit v1.2.3 From 3a86720567fd92819b449df10db85a2f73447d87 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Tue, 28 Jun 2011 10:31:18 -0700 Subject: target: Pass 2nd param of transport_split_cdb by value Since sectors is not modified, it's more straightforward to do this. Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_scdb.c | 20 ++++++++++---------- drivers/target/target_core_scdb.h | 10 +++++----- drivers/target/target_core_transport.c | 3 +-- include/target/target_core_base.h | 2 +- 4 files changed, 17 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c index dc6fed037ab3..72843441d4fa 100644 --- a/drivers/target/target_core_scdb.c +++ b/drivers/target/target_core_scdb.c @@ -42,13 +42,13 @@ */ void split_cdb_XX_6( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { cdb[1] = (lba >> 16) & 0x1f; cdb[2] = (lba >> 8) & 0xff; cdb[3] = lba & 0xff; - cdb[4] = *sectors & 0xff; + cdb[4] = sectors & 0xff; } /* split_cdb_XX_10(): @@ -57,11 +57,11 @@ void split_cdb_XX_6( */ void split_cdb_XX_10( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be16(*sectors, &cdb[7]); + put_unaligned_be16(sectors, &cdb[7]); } /* split_cdb_XX_12(): @@ -70,11 +70,11 @@ void split_cdb_XX_10( */ void split_cdb_XX_12( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { put_unaligned_be32(lba, &cdb[2]); - put_unaligned_be32(*sectors, &cdb[6]); + put_unaligned_be32(sectors, &cdb[6]); } /* split_cdb_XX_16(): @@ -83,11 +83,11 @@ void split_cdb_XX_12( */ void split_cdb_XX_16( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { put_unaligned_be64(lba, &cdb[2]); - put_unaligned_be32(*sectors, &cdb[10]); + put_unaligned_be32(sectors, &cdb[10]); } /* @@ -97,9 +97,9 @@ void split_cdb_XX_16( */ void split_cdb_XX_32( unsigned long long lba, - u32 *sectors, + u32 sectors, unsigned char *cdb) { put_unaligned_be64(lba, &cdb[12]); - put_unaligned_be32(*sectors, &cdb[28]); + put_unaligned_be32(sectors, &cdb[28]); } diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h index 98cd1c01ed83..48e9ccc9585e 100644 --- a/drivers/target/target_core_scdb.h +++ b/drivers/target/target_core_scdb.h @@ -1,10 +1,10 @@ #ifndef TARGET_CORE_SCDB_H #define TARGET_CORE_SCDB_H -extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *); -extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *); -extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *); -extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *); -extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *); +extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *); +extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *); +extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *); +extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *); +extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *); #endif /* TARGET_CORE_SCDB_H */ diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 267113c1607c..b499d14f4637 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4728,8 +4728,7 @@ static u32 transport_allocate_tasks( scsi_command_size(cmd->t_task_cdb)); /* Update new cdb with updated lba/sectors */ - cmd->transport_split_cdb(task->task_lba, - &task->task_sectors, cdb); + cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); /* * Perform the SE OBJ plugin and/or Transport plugin specific diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index bb9ef9f715a3..d97618a2ee95 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -479,7 +479,7 @@ struct se_cmd { struct list_head se_queue_node; struct target_core_fabric_ops *se_tfo; int (*transport_emulate_cdb)(struct se_cmd *); - void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *); + void (*transport_split_cdb)(unsigned long long, u32, unsigned char *); void (*transport_wait_for_tasks)(struct se_cmd *, int, int); void (*transport_complete_callback)(struct se_cmd *); int (*transport_qf_callback)(struct se_cmd *); -- cgit v1.2.3 From ec98f7825c6eaa4a9afb0eb518826efc8a2ed4a2 Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Wed, 20 Jul 2011 19:28:46 +0000 Subject: target: Eliminate usage of struct se_mem Both backstores and fabrics use arrays of struct scatterlist to describe data buffers. However TCM used struct se_mems, basically a linked list of scatterlist entries. We are able to simplify the code by eliminating this intermediate data structure and just using struct scatterlist[] throughout. Also, moved attachment of task to cmd out of transport_generic_get_task and into allocate_control_task and allocate_data_tasks. The reasoning is that it's nonintuitive that get_task should automatically add it to the cmd's task list -- it should just return an allocated, initialized task. That's all it should do, based on the function's name, so either the function shouldn't do it, or the name should change to encapsulate the entire essence of what it does. (nab: Fix compile warnings in tcm_fc, and make transport_kmap_first_data_page honor sg->offset for SGLs from contigious memory with TCM_Loop, and fix control se_cmd descriptor memory leak) Signed-off-by: Andy Grover Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/tcm_loop.c | 5 +- drivers/target/target_core_iblock.c | 2 +- drivers/target/target_core_pscsi.c | 2 +- drivers/target/target_core_transport.c | 995 +++++++++------------------------ drivers/target/tcm_fc/tfc_cmd.c | 25 +- drivers/target/tcm_fc/tfc_io.c | 58 +- include/target/target_core_base.h | 10 +- 7 files changed, 313 insertions(+), 784 deletions(-) (limited to 'include') diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index fe11a336b598..99603bc45786 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -175,10 +175,7 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd) sgl_bidi_count = sdb->table.nents; } - /* - * Map the SG memory into struct se_mem->page linked list using the same - * physical memory at sg->page_link. - */ + /* Tell the core about our preallocated memory */ ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc), scsi_sg_count(sc), sgl_bidi, sgl_bidi_count); if (ret < 0) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 392e75fb1087..164b72106b88 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -634,7 +634,7 @@ static int iblock_map_task_SG(struct se_task *task) hbio = tbio = bio; /* * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist - * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory. + * from task->task_sg -> struct scatterlist memory. */ for_each_sg(task->task_sg, sg, task->task_sg_num, i) { DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index d9569242e3dc..318ef14fe37d 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1097,7 +1097,7 @@ static int __pscsi_map_task_SG( return 0; /* * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup - * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> + * the bio_vec maplist from task->task_sg -> * struct scatterlist memory. The struct se_task->task_sg[] currently needs * to be attached to struct bios for submission to Linux/SCSI using * struct request to struct scsi_device->request_queue. diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index b499d14f4637..c743d94baf77 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -190,7 +190,6 @@ static struct kmem_cache *se_cmd_cache; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_tmr_req_cache; struct kmem_cache *se_ua_cache; -struct kmem_cache *se_mem_cache; struct kmem_cache *t10_pr_reg_cache; struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache; @@ -210,17 +209,12 @@ static void transport_handle_queue_full(struct se_cmd *cmd, static void transport_direct_request_timeout(struct se_cmd *cmd); static void transport_free_dev_tasks(struct se_cmd *cmd); static u32 transport_allocate_tasks(struct se_cmd *cmd, - unsigned long long starting_lba, u32 sectors, + unsigned long long starting_lba, enum dma_data_direction data_direction, - struct list_head *mem_list, int set_counts); + struct scatterlist *sgl, unsigned int nents); static int transport_generic_get_mem(struct se_cmd *cmd); static int transport_generic_remove(struct se_cmd *cmd, int session_reinstatement); -static int transport_cmd_get_valid_sectors(struct se_cmd *cmd); -static int transport_map_sg_to_mem(struct se_cmd *cmd, - struct list_head *se_mem_list, struct scatterlist *sgl); -static void transport_memcpy_se_mem_read_contig(unsigned char *dst, - struct list_head *se_mem_list, u32 len); static void transport_release_fe_cmd(struct se_cmd *cmd); static void transport_remove_cmd_from_queue(struct se_cmd *cmd, struct se_queue_obj *qobj); @@ -258,12 +252,6 @@ int init_se_kmem_caches(void) printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); goto out; } - se_mem_cache = kmem_cache_create("se_mem_cache", - sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL); - if (!(se_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n"); - goto out; - } t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", sizeof(struct t10_pr_registration), __alignof__(struct t10_pr_registration), 0, NULL); @@ -317,8 +305,6 @@ out: kmem_cache_destroy(se_sess_cache); if (se_ua_cache) kmem_cache_destroy(se_ua_cache); - if (se_mem_cache) - kmem_cache_destroy(se_mem_cache); if (t10_pr_reg_cache) kmem_cache_destroy(t10_pr_reg_cache); if (t10_alua_lu_gp_cache) @@ -338,7 +324,6 @@ void release_se_kmem_caches(void) kmem_cache_destroy(se_tmr_req_cache); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); - kmem_cache_destroy(se_mem_cache); kmem_cache_destroy(t10_pr_reg_cache); kmem_cache_destroy(t10_alua_lu_gp_cache); kmem_cache_destroy(t10_alua_lu_gp_mem_cache); @@ -1702,7 +1687,6 @@ transport_generic_get_task(struct se_cmd *cmd, { struct se_task *task; struct se_device *dev = cmd->se_dev; - unsigned long flags; task = dev->transport->alloc_task(cmd); if (!task) { @@ -1718,10 +1702,6 @@ transport_generic_get_task(struct se_cmd *cmd, task->se_dev = dev; task->task_data_direction = data_direction; - spin_lock_irqsave(&cmd->t_state_lock, flags); - list_add_tail(&task->t_list, &cmd->t_task_list); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return task; } @@ -1745,8 +1725,6 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->se_ordered_node); INIT_LIST_HEAD(&cmd->se_qf_node); - INIT_LIST_HEAD(&cmd->t_mem_list); - INIT_LIST_HEAD(&cmd->t_mem_bidi_list); INIT_LIST_HEAD(&cmd->t_task_list); init_completion(&cmd->transport_lun_fe_stop_comp); init_completion(&cmd->transport_lun_stop_comp); @@ -2838,9 +2816,10 @@ EXPORT_SYMBOL(transport_asciihex_to_binaryhex); static void transport_xor_callback(struct se_cmd *cmd) { unsigned char *buf, *addr; - struct se_mem *se_mem; + struct scatterlist *sg; unsigned int offset; int i; + int count; /* * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command * @@ -2858,28 +2837,32 @@ static void transport_xor_callback(struct se_cmd *cmd) return; } /* - * Copy the scatterlist WRITE buffer located at cmd->t_mem_list + * Copy the scatterlist WRITE buffer located at cmd->t_data_sg * into the locally allocated *buf */ - transport_memcpy_se_mem_read_contig(buf, &cmd->t_mem_list, - cmd->data_length); + sg_copy_to_buffer(cmd->t_data_sg, + cmd->t_data_nents, + buf, + cmd->data_length); + /* * Now perform the XOR against the BIDI read memory located at * cmd->t_mem_bidi_list */ offset = 0; - list_for_each_entry(se_mem, &cmd->t_mem_bidi_list, se_list) { - addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0); - if (!(addr)) + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { + addr = kmap_atomic(sg_page(sg), KM_USER0); + if (!addr) goto out; - for (i = 0; i < se_mem->se_len; i++) - *(addr + se_mem->se_off + i) ^= *(buf + offset + i); + for (i = 0; i < sg->length; i++) + *(addr + sg->offset + i) ^= *(buf + offset + i); - offset += se_mem->se_len; + offset += sg->length; kunmap_atomic(addr, KM_USER0); } + out: kfree(buf); } @@ -2971,6 +2954,35 @@ transport_handle_reservation_conflict(struct se_cmd *cmd) return -EINVAL; } +static inline long long transport_dev_end_lba(struct se_device *dev) +{ + return dev->transport->get_blocks(dev) + 1; +} + +static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) +{ + struct se_device *dev = cmd->se_dev; + u32 sectors; + + if (dev->transport->get_device_type(dev) != TYPE_DISK) + return 0; + + sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); + + if ((cmd->t_task_lba + sectors) > + transport_dev_end_lba(dev)) { + printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" + " transport_dev_end_lba(): %llu\n", + cmd->t_task_lba, sectors, + transport_dev_end_lba(dev)); + printk(KERN_ERR " We should return CHECK_CONDITION" + " but we don't yet\n"); + return 0; + } + + return sectors; +} + /* transport_generic_cmd_sequencer(): * * Generic Command Sequencer that should work for most DAS transport @@ -3580,28 +3592,6 @@ out_invalid_cdb_field: return -EINVAL; } -static inline void transport_release_tasks(struct se_cmd *); - -static void transport_memcpy_se_mem_read_contig( - unsigned char *dst, - struct list_head *se_mem_list, - u32 tot_len) -{ - struct se_mem *se_mem; - void *src; - u32 length; - - list_for_each_entry(se_mem, se_mem_list, se_list) { - length = min_t(u32, se_mem->se_len, tot_len); - src = page_address(se_mem->se_page) + se_mem->se_off; - memcpy(dst, src, length); - tot_len -= length; - if (!tot_len) - break; - dst += length; - } -} - /* * Called from transport_generic_complete_ok() and * transport_generic_request_failure() to determine which dormant/delayed @@ -3684,7 +3674,7 @@ static int transport_complete_qf(struct se_cmd *cmd) ret = cmd->se_tfo->queue_data_in(cmd); break; case DMA_TO_DEVICE: - if (!list_empty(&cmd->t_mem_bidi_list)) { + if (cmd->t_bidi_data_sg) { ret = cmd->se_tfo->queue_data_in(cmd); if (ret < 0) return ret; @@ -3794,7 +3784,7 @@ static void transport_generic_complete_ok(struct se_cmd *cmd) /* * Check if we need to send READ payload for BIDI-COMMAND */ - if (!list_empty(&cmd->t_mem_bidi_list)) { + if (cmd->t_bidi_data_sg) { spin_lock(&cmd->se_lun->lun_sep_lock); if (cmd->se_lun->lun_sep) { cmd->se_lun->lun_sep->sep_stats.tx_data_octets += @@ -3856,41 +3846,42 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) static inline void transport_free_pages(struct se_cmd *cmd) { - struct se_mem *se_mem, *se_mem_tmp; + struct scatterlist *sg; int free_page = 1; + int count; if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) free_page = 0; if (cmd->se_dev->transport->do_se_mem_map) free_page = 0; - list_for_each_entry_safe(se_mem, se_mem_tmp, - &cmd->t_mem_list, se_list) { + for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) { /* - * We only release call __free_page(struct se_mem->se_page) when + * Only called if * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, */ if (free_page) - __free_page(se_mem->se_page); + __free_page(sg_page(sg)); - list_del(&se_mem->se_list); - kmem_cache_free(se_mem_cache, se_mem); } - cmd->t_tasks_se_num = 0; + if (free_page) + kfree(cmd->t_data_sg); + cmd->t_data_sg = NULL; + cmd->t_data_nents = 0; - list_for_each_entry_safe(se_mem, se_mem_tmp, - &cmd->t_mem_bidi_list, se_list) { + for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { /* - * We only release call __free_page(struct se_mem->se_page) when + * Only called if * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, */ if (free_page) - __free_page(se_mem->se_page); + __free_page(sg_page(sg)); - list_del(&se_mem->se_list); - kmem_cache_free(se_mem_cache, se_mem); } - cmd->t_tasks_se_bidi_num = 0; + if (free_page) + kfree(cmd->t_bidi_data_sg); + cmd->t_bidi_data_sg = NULL; + cmd->t_bidi_data_nents = 0; } static inline void transport_release_tasks(struct se_cmd *cmd) @@ -3979,7 +3970,8 @@ free_pages: } /* - * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map + * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of + * allocating in the core. * @cmd: Associated se_cmd descriptor * @mem: SGL style memory for TCM WRITE / READ * @sg_mem_num: Number of SGL elements @@ -3996,35 +3988,18 @@ int transport_generic_map_mem_to_cmd( struct scatterlist *sgl_bidi, u32 sgl_bidi_count) { - int ret; - if (!sgl || !sgl_count) return 0; - /* - * Convert sgls (sgl, sgl_bidi) to list of se_mems - */ if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) || (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) { - /* - * For CDB using TCM struct se_mem linked list scatterlist memory - * processed into a TCM struct se_subsystem_dev, we do the mapping - * from the passed physical memory to struct se_mem->se_page here. - */ - ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_list, sgl); - if (ret < 0) - return -ENOMEM; - cmd->t_tasks_se_num = ret; - /* - * Setup BIDI READ list of struct se_mem elements - */ - if (sgl_bidi && sgl_bidi_count) { - ret = transport_map_sg_to_mem(cmd, &cmd->t_mem_bidi_list, sgl_bidi); - if (ret < 0) - return -ENOMEM; + cmd->t_data_sg = sgl; + cmd->t_data_nents = sgl_count; - cmd->t_tasks_se_bidi_num = ret; + if (sgl_bidi && sgl_bidi_count) { + cmd->t_bidi_data_sg = sgl_bidi; + cmd->t_bidi_data_nents = sgl_bidi_count; } cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; } @@ -4033,91 +4008,58 @@ int transport_generic_map_mem_to_cmd( } EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); - -static inline long long transport_dev_end_lba(struct se_device *dev) -{ - return dev->transport->get_blocks(dev) + 1; -} - -static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - u32 sectors; - - if (dev->transport->get_device_type(dev) != TYPE_DISK) - return 0; - - sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); - - if ((cmd->t_task_lba + sectors) > - transport_dev_end_lba(dev)) { - printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" - " transport_dev_end_lba(): %llu\n", - cmd->t_task_lba, sectors, - transport_dev_end_lba(dev)); - return 0; - } - - return sectors; -} - static int transport_new_cmd_obj(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; u32 task_cdbs; u32 rc; + int set_counts = 1; - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) { - task_cdbs = 1; - cmd->t_task_list_num = 1; - } else { - int set_counts = 1; - - /* - * Setup any BIDI READ tasks and memory from - * cmd->t_mem_bidi_list so the READ struct se_tasks - * are queued first for the non pSCSI passthrough case. - */ - if (!list_empty(&cmd->t_mem_bidi_list) && - (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { - rc = transport_allocate_tasks(cmd, - cmd->t_task_lba, - transport_cmd_get_valid_sectors(cmd), - DMA_FROM_DEVICE, &cmd->t_mem_bidi_list, - set_counts); - if (!(rc)) { - cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - return PYX_TRANSPORT_LU_COMM_FAILURE; - } - set_counts = 0; - } - /* - * Setup the tasks and memory from cmd->t_mem_list - * Note for BIDI transfers this will contain the WRITE payload - */ - task_cdbs = transport_allocate_tasks(cmd, - cmd->t_task_lba, - transport_cmd_get_valid_sectors(cmd), - cmd->data_direction, &cmd->t_mem_list, - set_counts); - if (!(task_cdbs)) { + /* + * Setup any BIDI READ tasks and memory from + * cmd->t_mem_bidi_list so the READ struct se_tasks + * are queued first for the non pSCSI passthrough case. + */ + if (cmd->t_bidi_data_sg && + (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { + rc = transport_allocate_tasks(cmd, + cmd->t_task_lba, + DMA_FROM_DEVICE, + cmd->t_bidi_data_sg, + cmd->t_bidi_data_nents); + if (!rc) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return PYX_TRANSPORT_LU_COMM_FAILURE; } - cmd->t_task_list_num = task_cdbs; + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); + set_counts = 0; + } + /* + * Setup the tasks and memory from cmd->t_mem_list + * Note for BIDI transfers this will contain the WRITE payload + */ + task_cdbs = transport_allocate_tasks(cmd, + cmd->t_task_lba, + cmd->data_direction, + cmd->t_data_sg, + cmd->t_data_nents); + if (!task_cdbs) { + cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; + cmd->scsi_sense_reason = + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return PYX_TRANSPORT_LU_COMM_FAILURE; + } -#if 0 - printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:" - " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length, - cmd->t_task_lba, cmd->t_tasks_sectors, - cmd->t_task_cdbs); -#endif + if (set_counts) { + atomic_inc(&cmd->t_fe_count); + atomic_inc(&cmd->t_se_count); } + cmd->t_task_list_num = task_cdbs; + atomic_set(&cmd->t_task_cdbs_left, task_cdbs); atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); @@ -4126,39 +4068,31 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) void *transport_kmap_first_data_page(struct se_cmd *cmd) { - struct se_mem *se_mem; - - BUG_ON(list_empty(&cmd->t_mem_list)); - - se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); + struct scatterlist *sg = cmd->t_data_sg; + BUG_ON(!sg); /* - * 1st se_mem should point to a page, and we shouldn't need more than - * that for this cmd + * We need to take into account a possible offset here for fabrics like + * tcm_loop who may be using a contig buffer from the SCSI midlayer for + * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() */ - BUG_ON(cmd->data_length > PAGE_SIZE); - - return kmap(se_mem->se_page); + return kmap(sg_page(sg)) + sg->offset; } EXPORT_SYMBOL(transport_kmap_first_data_page); void transport_kunmap_first_data_page(struct se_cmd *cmd) { - struct se_mem *se_mem; - - BUG_ON(list_empty(&cmd->t_mem_list)); - - se_mem = list_first_entry(&cmd->t_mem_list, struct se_mem, se_list); - - kunmap(se_mem->se_page); + kunmap(sg_page(cmd->t_data_sg)); } EXPORT_SYMBOL(transport_kunmap_first_data_page); static int transport_generic_get_mem(struct se_cmd *cmd) { - struct se_mem *se_mem; - int length = cmd->data_length; + u32 length = cmd->data_length; + unsigned int nents; + struct page *page; + int i = 0; /* * If the device uses memory mapping this is enough. @@ -4166,161 +4100,34 @@ transport_generic_get_mem(struct se_cmd *cmd) if (cmd->se_dev->transport->do_se_mem_map) return 0; - /* Even cmds with length 0 will get here, btw */ - while (length) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - goto out; - } - -/* #warning FIXME Allocate contigous pages for struct se_mem elements */ - se_mem->se_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); - if (!(se_mem->se_page)) { - printk(KERN_ERR "alloc_pages() failed\n"); - goto out; - } + nents = DIV_ROUND_UP(length, PAGE_SIZE); + cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); + if (!cmd->t_data_sg) + return -ENOMEM; - INIT_LIST_HEAD(&se_mem->se_list); - se_mem->se_len = min_t(u32, length, PAGE_SIZE); - list_add_tail(&se_mem->se_list, &cmd->t_mem_list); - cmd->t_tasks_se_num++; + cmd->t_data_nents = nents; + sg_init_table(cmd->t_data_sg, nents); - DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)" - " Offset(%u)\n", se_mem->se_page, se_mem->se_len, - se_mem->se_off); + while (length) { + u32 page_len = min_t(u32, length, PAGE_SIZE); + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto out; - length -= se_mem->se_len; + sg_set_page(&cmd->t_data_sg[i], page, page_len, 0); + length -= page_len; + i++; } - - DEBUG_MEM("Allocated total struct se_mem elements(%u)\n", - cmd->t_tasks_se_num); - return 0; -out: - if (se_mem) - __free_pages(se_mem->se_page, 0); - kmem_cache_free(se_mem_cache, se_mem); - return -ENOMEM; -} - -int transport_init_task_sg( - struct se_task *task, - struct se_mem *in_se_mem, - u32 task_offset) -{ - struct se_cmd *se_cmd = task->task_se_cmd; - struct se_device *se_dev = se_cmd->se_dev; - struct se_mem *se_mem = in_se_mem; - struct target_core_fabric_ops *tfo = se_cmd->se_tfo; - u32 sg_length, task_size = task->task_size, task_sg_num_padded; - - while (task_size != 0) { - DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)" - " se_mem->se_off(%u) task_offset(%u)\n", - se_mem->se_page, se_mem->se_len, - se_mem->se_off, task_offset); - - if (task_offset == 0) { - if (task_size >= se_mem->se_len) { - sg_length = se_mem->se_len; - - if (!(list_is_last(&se_mem->se_list, - &se_cmd->t_mem_list))) - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - } else { - sg_length = task_size; - task_size -= sg_length; - goto next; - } - - DEBUG_SC("sg_length(%u) task_size(%u)\n", - sg_length, task_size); - } else { - if ((se_mem->se_len - task_offset) > task_size) { - sg_length = task_size; - task_size -= sg_length; - goto next; - } else { - sg_length = (se_mem->se_len - task_offset); - - if (!(list_is_last(&se_mem->se_list, - &se_cmd->t_mem_list))) - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - } - DEBUG_SC("sg_length(%u) task_size(%u)\n", - sg_length, task_size); - - task_offset = 0; - } - task_size -= sg_length; -next: - DEBUG_SC("task[%u] - Reducing task_size to(%u)\n", - task->task_no, task_size); - - task->task_sg_num++; - } - /* - * Check if the fabric module driver is requesting that all - * struct se_task->task_sg[] be chained together.. If so, - * then allocate an extra padding SG entry for linking and - * marking the end of the chained SGL. - */ - if (tfo->task_sg_chaining) { - task_sg_num_padded = (task->task_sg_num + 1); - task->task_padded_sg = 1; - } else - task_sg_num_padded = task->task_sg_num; - - task->task_sg = kzalloc(task_sg_num_padded * - sizeof(struct scatterlist), GFP_KERNEL); - if (!(task->task_sg)) { - printk(KERN_ERR "Unable to allocate memory for" - " task->task_sg\n"); - return -ENOMEM; - } - sg_init_table(&task->task_sg[0], task_sg_num_padded); - /* - * Setup task->task_sg_bidi for SCSI READ payload for - * TCM/pSCSI passthrough if present for BIDI-COMMAND - */ - if (!list_empty(&se_cmd->t_mem_bidi_list) && - (se_dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) { - task->task_sg_bidi = kzalloc(task_sg_num_padded * - sizeof(struct scatterlist), GFP_KERNEL); - if (!(task->task_sg_bidi)) { - kfree(task->task_sg); - task->task_sg = NULL; - printk(KERN_ERR "Unable to allocate memory for" - " task->task_sg_bidi\n"); - return -ENOMEM; - } - sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded); - } - /* - * For the chaining case, setup the proper end of SGL for the - * initial submission struct task into struct se_subsystem_api. - * This will be cleared later by transport_do_task_sg_chain() - */ - if (task->task_padded_sg) { - sg_mark_end(&task->task_sg[task->task_sg_num - 1]); - /* - * Added the 'if' check before marking end of bi-directional - * scatterlist (which gets created only in case of request - * (RD + WR). - */ - if (task->task_sg_bidi) - sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]); +out: + while (i >= 0) { + __free_page(sg_page(&cmd->t_data_sg[i])); + i--; } - - DEBUG_SC("Successfully allocated task->task_sg_num(%u)," - " task_sg_num_padded(%u)\n", task->task_sg_num, - task_sg_num_padded); - - return task->task_sg_num; + kfree(cmd->t_data_sg); + cmd->t_data_sg = NULL; + return -ENOMEM; } /* Reduce sectors if they are too long for the device */ @@ -4338,165 +4145,6 @@ static inline sector_t transport_limit_task_sectors( return sectors; } -/* - * Convert a sgl into a linked list of se_mems. - */ -static int transport_map_sg_to_mem( - struct se_cmd *cmd, - struct list_head *se_mem_list, - struct scatterlist *sg) -{ - struct se_mem *se_mem; - u32 cmd_size = cmd->data_length; - int sg_count = 0; - - WARN_ON(!sg); - - while (cmd_size) { - /* - * NOTE: it is safe to return -ENOMEM at any time in creating this - * list because transport_free_pages() will eventually be called, and is - * smart enough to deallocate all list items for sg and sg_bidi lists. - */ - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&se_mem->se_list); - DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u" - " sg_page: %p offset: %d length: %d\n", cmd_size, - sg_page(sg), sg->offset, sg->length); - - se_mem->se_page = sg_page(sg); - se_mem->se_off = sg->offset; - - if (cmd_size > sg->length) { - se_mem->se_len = sg->length; - sg = sg_next(sg); - } else - se_mem->se_len = cmd_size; - - cmd_size -= se_mem->se_len; - sg_count++; - - DEBUG_MEM("sg_to_mem: sg_count: %u cmd_size: %u\n", - sg_count, cmd_size); - DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n", - se_mem->se_page, se_mem->se_off, se_mem->se_len); - - list_add_tail(&se_mem->se_list, se_mem_list); - } - - DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments\n", sg_count); - - return sg_count; -} - -/* transport_map_mem_to_sg(): - * - * - */ -int transport_map_mem_to_sg( - struct se_task *task, - struct list_head *se_mem_list, - struct scatterlist *sg, - struct se_mem *in_se_mem, - struct se_mem **out_se_mem, - u32 *se_mem_cnt, - u32 *task_offset) -{ - struct se_cmd *se_cmd = task->task_se_cmd; - struct se_mem *se_mem = in_se_mem; - u32 task_size = task->task_size, sg_no = 0; - - if (!sg) { - printk(KERN_ERR "Unable to locate valid struct" - " scatterlist pointer\n"); - return -EINVAL; - } - - while (task_size != 0) { - /* - * Setup the contiguous array of scatterlists for - * this struct se_task. - */ - sg_assign_page(sg, se_mem->se_page); - - if (*task_offset == 0) { - sg->offset = se_mem->se_off; - - if (task_size >= se_mem->se_len) { - sg->length = se_mem->se_len; - - if (!(list_is_last(&se_mem->se_list, - &se_cmd->t_mem_list))) { - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - (*se_mem_cnt)++; - } - } else { - sg->length = task_size; - /* - * Determine if we need to calculate an offset - * into the struct se_mem on the next go around.. - */ - task_size -= sg->length; - if (!(task_size)) - *task_offset = sg->length; - - goto next; - } - - } else { - sg->offset = (*task_offset + se_mem->se_off); - - if ((se_mem->se_len - *task_offset) > task_size) { - sg->length = task_size; - /* - * Determine if we need to calculate an offset - * into the struct se_mem on the next go around.. - */ - task_size -= sg->length; - if (!(task_size)) - *task_offset += sg->length; - - goto next; - } else { - sg->length = (se_mem->se_len - *task_offset); - - if (!(list_is_last(&se_mem->se_list, - &se_cmd->t_mem_list))) { - se_mem = list_entry(se_mem->se_list.next, - struct se_mem, se_list); - (*se_mem_cnt)++; - } - } - - *task_offset = 0; - } - task_size -= sg->length; -next: - DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing" - " task_size to(%u), task_offset: %u\n", task->task_no, sg_no, - sg_page(sg), sg->length, sg->offset, task_size, *task_offset); - - sg_no++; - if (!(task_size)) - break; - - sg = sg_next(sg); - - if (task_size > se_cmd->data_length) - BUG(); - } - *out_se_mem = se_mem; - - DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)" - " SGs\n", task->task_no, *se_mem_cnt, sg_no); - - return 0; -} /* * This function can be used by HW target mode drivers to create a linked @@ -4506,81 +4154,43 @@ next: */ void transport_do_task_sg_chain(struct se_cmd *cmd) { - struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL; - struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL; - struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL; + struct scatterlist *sg_first = NULL; + struct scatterlist *sg_prev = NULL; + int sg_prev_nents = 0; + struct scatterlist *sg; struct se_task *task; - struct target_core_fabric_ops *tfo = cmd->se_tfo; - u32 task_sg_num = 0, sg_count = 0; + u32 chained_nents = 0; int i; - if (tfo->task_sg_chaining == 0) { - printk(KERN_ERR "task_sg_chaining is diabled for fabric module:" - " %s\n", tfo->get_fabric_name()); - dump_stack(); - return; - } + BUG_ON(!cmd->se_tfo->task_sg_chaining); + /* * Walk the struct se_task list and setup scatterlist chains * for each contiguously allocated struct se_task->task_sg[]. */ list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (!(task->task_sg) || !(task->task_padded_sg)) + if (!task->task_sg) continue; - if (sg_head && sg_link) { - sg_head_cur = &task->task_sg[0]; - sg_link_cur = &task->task_sg[task->task_sg_num]; - /* - * Either add chain or mark end of scatterlist - */ - if (!(list_is_last(&task->t_list, - &cmd->t_task_list))) { - /* - * Clear existing SGL termination bit set in - * transport_init_task_sg(), see sg_mark_end() - */ - sg_end_cur = &task->task_sg[task->task_sg_num - 1]; - sg_end_cur->page_link &= ~0x02; - - sg_chain(sg_head, task_sg_num, sg_head_cur); - sg_count += task->task_sg_num; - task_sg_num = (task->task_sg_num + 1); - } else { - sg_chain(sg_head, task_sg_num, sg_head_cur); - sg_count += task->task_sg_num; - task_sg_num = task->task_sg_num; - } + BUG_ON(!task->task_padded_sg); - sg_head = sg_head_cur; - sg_link = sg_link_cur; - continue; - } - sg_head = sg_first = &task->task_sg[0]; - sg_link = &task->task_sg[task->task_sg_num]; - /* - * Check for single task.. - */ - if (!(list_is_last(&task->t_list, &cmd->t_task_list))) { - /* - * Clear existing SGL termination bit set in - * transport_init_task_sg(), see sg_mark_end() - */ - sg_end = &task->task_sg[task->task_sg_num - 1]; - sg_end->page_link &= ~0x02; - sg_count += task->task_sg_num; - task_sg_num = (task->task_sg_num + 1); + if (!sg_first) { + sg_first = task->task_sg; + chained_nents = task->task_sg_num; } else { - sg_count += task->task_sg_num; - task_sg_num = task->task_sg_num; + sg_chain(sg_prev, sg_prev_nents, task->task_sg); + chained_nents += task->task_sg_num; } + + sg_prev = task->task_sg; + sg_prev_nents = task->task_sg_num; } /* * Setup the starting pointer and total t_tasks_sg_linked_no including * padding SGs for linking and to mark the end. */ cmd->t_tasks_sg_chained = sg_first; - cmd->t_tasks_sg_chained_no = sg_count; + cmd->t_tasks_sg_chained_no = chained_nents; DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, @@ -4599,129 +4209,46 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) } EXPORT_SYMBOL(transport_do_task_sg_chain); -static int transport_do_se_mem_map( - struct se_device *dev, - struct se_task *task, - struct list_head *se_mem_list, - void *in_mem, - struct se_mem *in_se_mem, - struct se_mem **out_se_mem, - u32 *se_mem_cnt, - u32 *task_offset_in) -{ - u32 task_offset = *task_offset_in; - int ret = 0; - /* - * se_subsystem_api_t->do_se_mem_map is used when internal allocation - * has been done by the transport plugin. - */ - if (dev->transport->do_se_mem_map) { - ret = dev->transport->do_se_mem_map(task, se_mem_list, - in_mem, in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); - if (ret == 0) - task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; - - return ret; - } - - BUG_ON(list_empty(se_mem_list)); - /* - * This is the normal path for all normal non BIDI and BIDI-COMMAND - * WRITE payloads.. If we need to do BIDI READ passthrough for - * TCM/pSCSI the first call to transport_do_se_mem_map -> - * transport_init_task_sg() -> transport_map_mem_to_sg() will do the - * allocation for task->task_sg_bidi, and the subsequent call to - * transport_do_se_mem_map() from transport_generic_get_cdb_count() - */ - if (!(task->task_sg_bidi)) { - /* - * Assume default that transport plugin speaks preallocated - * scatterlists. - */ - ret = transport_init_task_sg(task, in_se_mem, task_offset); - if (ret <= 0) - return ret; - /* - * struct se_task->task_sg now contains the struct scatterlist array. - */ - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); - } - /* - * Handle the se_mem_list -> struct task->task_sg_bidi - * memory map for the extra BIDI READ payload - */ - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi, - in_se_mem, out_se_mem, se_mem_cnt, - task_offset_in); -} - /* * Break up cmd into chunks transport can handle */ -static u32 transport_allocate_tasks( +static int transport_allocate_data_tasks( struct se_cmd *cmd, unsigned long long lba, - u32 sectors, enum dma_data_direction data_direction, - struct list_head *mem_list, - int set_counts) + struct scatterlist *sgl, + unsigned int sgl_nents) { unsigned char *cdb = NULL; struct se_task *task; - struct se_mem *se_mem = NULL; - struct se_mem *se_mem_lout = NULL; - struct se_mem *se_mem_bidi = NULL; - struct se_mem *se_mem_bidi_lout = NULL; struct se_device *dev = cmd->se_dev; - int ret; - u32 task_offset_in = 0; - u32 se_mem_cnt = 0; - u32 se_mem_bidi_cnt = 0; - u32 task_cdbs = 0; - - BUG_ON(!mem_list); - /* - * While using RAMDISK_DR backstores is the only case where - * mem_list will ever be empty at this point. - */ - if (!(list_empty(mem_list))) - se_mem = list_first_entry(mem_list, struct se_mem, se_list); - /* - * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to - * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation - */ - if (!list_empty(&cmd->t_mem_bidi_list) && - (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) - se_mem_bidi = list_first_entry(&cmd->t_mem_bidi_list, - struct se_mem, se_list); + unsigned long flags; + sector_t sectors; + int task_count; + int i; + sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; + u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; + struct scatterlist *sg; + struct scatterlist *cmd_sg; - while (sectors) { - sector_t limited_sectors; + WARN_ON(cmd->data_length % sector_size); + sectors = DIV_ROUND_UP(cmd->data_length, sector_size); + task_count = DIV_ROUND_UP(sectors, dev_max_sectors); - DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n", - cmd->se_tfo->get_task_tag(cmd), lba, sectors, - transport_dev_end_lba(dev)); - - limited_sectors = transport_limit_task_sectors(dev, lba, sectors); - if (!limited_sectors) - break; + cmd_sg = sgl; + for (i = 0; i < task_count; i++) { + unsigned int task_size; + int count; task = transport_generic_get_task(cmd, data_direction); if (!task) - goto out; + return -ENOMEM; task->task_lba = lba; - task->task_sectors = limited_sectors; - lba += task->task_sectors; - sectors -= task->task_sectors; - task->task_size = (task->task_sectors * - dev->se_sub_dev->se_dev_attrib.block_size); + task->task_sectors = min(sectors, dev_max_sectors); + task->task_size = task->task_sectors * sector_size; cdb = dev->transport->get_cdb(task); - /* Should be part of task, can't fail */ BUG_ON(!cdb); memcpy(cdb, cmd->t_task_cdb, @@ -4731,94 +4258,86 @@ static u32 transport_allocate_tasks( cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); /* - * Perform the SE OBJ plugin and/or Transport plugin specific - * mapping for cmd->t_mem_list. And setup the - * task->task_sg and if necessary task->task_sg_bidi + * Check if the fabric module driver is requesting that all + * struct se_task->task_sg[] be chained together.. If so, + * then allocate an extra padding SG entry for linking and + * marking the end of the chained SGL. + * Possibly over-allocate task sgl size by using cmd sgl size. + * It's so much easier and only a waste when task_count > 1. + * That is extremely rare. */ - ret = transport_do_se_mem_map(dev, task, mem_list, - NULL, se_mem, &se_mem_lout, &se_mem_cnt, - &task_offset_in); - if (ret < 0) - goto out; + task->task_sg_num = sgl_nents; + if (cmd->se_tfo->task_sg_chaining) { + task->task_sg_num++; + task->task_padded_sg = 1; + } - se_mem = se_mem_lout; - /* - * Setup the cmd->t_mem_bidi_list -> task->task_sg_bidi - * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI - * - * Note that the first call to transport_do_se_mem_map() above will - * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map() - * -> transport_init_task_sg(), and the second here will do the - * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI. - */ - if (task->task_sg_bidi != NULL) { - ret = transport_do_se_mem_map(dev, task, - &cmd->t_mem_bidi_list, NULL, - se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt, - &task_offset_in); - if (ret < 0) - goto out; + task->task_sg = kmalloc(sizeof(struct scatterlist) * \ + task->task_sg_num, GFP_KERNEL); + if (!task->task_sg) { + cmd->se_dev->transport->free_task(task); + return -ENOMEM; + } - se_mem_bidi = se_mem_bidi_lout; + sg_init_table(task->task_sg, task->task_sg_num); + + task_size = task->task_size; + + /* Build new sgl, only up to task_size */ + for_each_sg(task->task_sg, sg, task->task_sg_num, count) { + if (cmd_sg->length > task_size) + break; + + *sg = *cmd_sg; + task_size -= cmd_sg->length; + cmd_sg = sg_next(cmd_sg); } - task_cdbs++; - DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n", - task_cdbs, task->task_sg_num); - } + lba += task->task_sectors; + sectors -= task->task_sectors; - if (set_counts) { - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); } - DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n", - cmd->se_tfo->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE) - ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs); - - return task_cdbs; -out: - return 0; + return task_count; } static int -transport_map_control_cmd_to_task(struct se_cmd *cmd) +transport_allocate_control_task(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; unsigned char *cdb; struct se_task *task; - int ret; + unsigned long flags; task = transport_generic_get_task(cmd, cmd->data_direction); if (!task) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + return -ENOMEM; cdb = dev->transport->get_cdb(task); BUG_ON(!cdb); memcpy(cdb, cmd->t_task_cdb, scsi_command_size(cmd->t_task_cdb)); + task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, + GFP_KERNEL); + if (!task->task_sg) { + cmd->se_dev->transport->free_task(task); + return -ENOMEM; + } + + memcpy(task->task_sg, cmd->t_data_sg, + sizeof(struct scatterlist) * cmd->t_data_nents); task->task_size = cmd->data_length; - task->task_sg_num = - (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0; + task->task_sg_num = cmd->t_data_nents; - atomic_inc(&cmd->t_fe_count); - atomic_inc(&cmd->t_se_count); + spin_lock_irqsave(&cmd->t_state_lock, flags); + list_add_tail(&task->t_list, &cmd->t_task_list); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { - struct se_mem *se_mem = NULL, *se_mem_lout = NULL; - u32 se_mem_cnt = 0, task_offset = 0; - - if (!list_empty(&cmd->t_mem_list)) - se_mem = list_first_entry(&cmd->t_mem_list, - struct se_mem, se_list); - - ret = transport_do_se_mem_map(dev, task, - &cmd->t_mem_list, NULL, se_mem, - &se_mem_lout, &se_mem_cnt, &task_offset); - if (ret < 0) - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; - if (dev->transport->map_task_SG) return dev->transport->map_task_SG(task); return 0; @@ -4828,10 +4347,32 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) return 0; } else { BUG(); - return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; + return -ENOMEM; } } +static u32 transport_allocate_tasks( + struct se_cmd *cmd, + unsigned long long lba, + enum dma_data_direction data_direction, + struct scatterlist *sgl, + unsigned int sgl_nents) +{ + int ret; + + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + return transport_allocate_data_tasks(cmd, lba, data_direction, + sgl, sgl_nents); + } else { + ret = transport_allocate_control_task(cmd); + if (ret < 0) + return ret; + else + return 1; + } +} + + /* transport_generic_new_cmd(): Called from transport_processing_thread() * * Allocate storage transport resources from a set of values predefined @@ -4850,10 +4391,10 @@ int transport_generic_new_cmd(struct se_cmd *cmd) /* * Determine is the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() - * to setup beforehand the linked list of physical memory at - * cmd->t_mem_list of struct se_mem->se_page + * beforehand. */ - if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) { + if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && + cmd->data_length) { ret = transport_generic_get_mem(cmd); if (ret < 0) return ret; @@ -4863,19 +4404,13 @@ int transport_generic_new_cmd(struct se_cmd *cmd) if (ret < 0) return ret; - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_sent)) - continue; - if (!dev->transport->map_task_SG) - continue; + list_for_each_entry(task, &cmd->t_task_list, t_list) { + if (atomic_read(&task->task_sent)) + continue; + if (!dev->transport->map_task_SG) + continue; - ret = dev->transport->map_task_SG(task); - if (ret < 0) - return ret; - } - } else { - ret = transport_map_control_cmd_to_task(cmd); + ret = dev->transport->map_task_SG(task); if (ret < 0) return ret; } diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 1017f56bbbcc..9365e53947ad 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -59,7 +59,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) struct fc_exch *ep; struct fc_seq *sp; struct se_cmd *se_cmd; - struct se_mem *mem; + struct scatterlist *sg; + int count; if (!(ft_debug_logging & FT_DEBUG_IO)) return; @@ -71,15 +72,16 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) caller, cmd, cmd->cdb); printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); - printk(KERN_INFO "%s: cmd %p se_num %u len %u se_cmd_flags <0x%x>\n", - caller, cmd, se_cmd->t_tasks_se_num, + printk(KERN_INFO "%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", + caller, cmd, se_cmd->t_data_nents, se_cmd->data_length, se_cmd->se_cmd_flags); - list_for_each_entry(mem, &se_cmd->t_mem_list, se_list) - printk(KERN_INFO "%s: cmd %p mem %p page %p " - "len 0x%x off 0x%x\n", - caller, cmd, mem, - mem->se_page, mem->se_len, mem->se_off); + for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) + printk(KERN_INFO "%s: cmd %p sg %p page %p " + "len 0x%x off 0x%x\n", + caller, cmd, sg, + sg_page(sg), sg->length, sg->offset); + sp = cmd->seq; if (sp) { ep = fc_seq_exch(sp); @@ -256,10 +258,9 @@ int ft_write_pending(struct se_cmd *se_cmd) (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { /* - * Map se_mem list to scatterlist, so that - * DDP can be setup. DDP setup function require - * scatterlist. se_mem_list is internal to - * TCM/LIO target + * cmd may have been broken up into multiple + * tasks. Link their sgs together so we can + * operate on them all at once. */ transport_do_task_sg_chain(se_cmd); cmd->sg = se_cmd->t_tasks_sg_chained; diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 837660728563..3563a9029c4a 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -68,17 +68,17 @@ int ft_queue_data_in(struct se_cmd *se_cmd) struct fc_frame *fp = NULL; struct fc_exch *ep; struct fc_lport *lport; - struct se_mem *mem; + struct scatterlist *sg = NULL; size_t remaining; u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; - u32 mem_off; + u32 mem_off = 0; u32 fh_off = 0; u32 frame_off = 0; size_t frame_len = 0; - size_t mem_len; + size_t mem_len = 0; size_t tlen; size_t off_in_page; - struct page *page; + struct page *page = NULL; int use_sg; int error; void *page_addr; @@ -94,13 +94,12 @@ int ft_queue_data_in(struct se_cmd *se_cmd) /* * Setup to use first mem list entry, unless no data. */ - BUG_ON(remaining && list_empty(&se_cmd->t_mem_list)); + BUG_ON(remaining && !se_cmd->t_data_sg); if (remaining) { - mem = list_first_entry(&se_cmd->t_mem_list, - struct se_mem, se_list); - mem_len = mem->se_len; - mem_off = mem->se_off; - page = mem->se_page; + sg = se_cmd->t_data_sg; + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); } /* no scatter/gather in skb for odd word length due to fc_seq_send() */ @@ -108,12 +107,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd) while (remaining) { if (!mem_len) { - BUG_ON(!mem); - mem = list_entry(mem->se_list.next, - struct se_mem, se_list); - mem_len = min((size_t)mem->se_len, remaining); - mem_off = mem->se_off; - page = mem->se_page; + sg = sg_next(sg); + mem_len = min((size_t)sg->length, remaining); + mem_off = sg->offset; + page = sg_page(sg); } if (!frame_len) { /* @@ -200,13 +197,13 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) struct fc_exch *ep; struct fc_lport *lport; struct fc_frame_header *fh; - struct se_mem *mem; - u32 mem_off; + struct scatterlist *sg = NULL; + u32 mem_off = 0; u32 rel_off; size_t frame_len; - size_t mem_len; + size_t mem_len = 0; size_t tlen; - struct page *page; + struct page *page = NULL; void *page_addr; void *from; void *to; @@ -288,23 +285,20 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) /* * Setup to use first mem list entry, unless no data. */ - BUG_ON(frame_len && list_empty(&se_cmd->t_mem_list)); + BUG_ON(frame_len && !se_cmd->t_data_sg); if (frame_len) { - mem = list_first_entry(&se_cmd->t_mem_list, - struct se_mem, se_list); - mem_len = mem->se_len; - mem_off = mem->se_off; - page = mem->se_page; + sg = se_cmd->t_data_sg; + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); } while (frame_len) { if (!mem_len) { - BUG_ON(!mem); - mem = list_entry(mem->se_list.next, - struct se_mem, se_list); - mem_len = mem->se_len; - mem_off = mem->se_off; - page = mem->se_page; + sg = sg_next(sg); + mem_len = sg->length; + mem_off = sg->offset; + page = sg_page(sg); } if (rel_off >= mem_len) { rel_off -= mem_len; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index d97618a2ee95..465f266a0205 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -490,8 +490,6 @@ struct se_cmd { int t_tasks_failed; int t_tasks_fua; bool t_tasks_bidi; - u32 t_tasks_se_num; - u32 t_tasks_se_bidi_num; u32 t_tasks_sg_chained_no; atomic_t t_fe_count; atomic_t t_se_count; @@ -523,9 +521,13 @@ struct se_cmd { */ struct scatterlist *t_task_pt_sgl; u32 t_task_pt_sgl_num; - struct list_head t_mem_list; + + struct scatterlist *t_data_sg; + unsigned int t_data_nents; + struct scatterlist *t_bidi_data_sg; + unsigned int t_bidi_data_nents; + /* Used for BIDI READ */ - struct list_head t_mem_bidi_list; struct list_head t_task_list; u32 t_task_list_num; -- cgit v1.2.3 From 6708bb27bb2703da238f21f516034263348af5be Mon Sep 17 00:00:00 2001 From: Andy Grover Date: Wed, 8 Jun 2011 10:36:43 -0700 Subject: target: Follow up core updates from AGrover and HCH (round 4) This patch contains the squashed version of forth round series cleanups from Andy and Christoph following the post heavy lifting in the preceeding: 'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather' changes. This also includes a conversion of target core and the v3.0 mainline fabric modules (loopback and tcm_fc) to use pr_debug and the CONFIG_DYNAMIC_DEBUG infrastructure! These have been squashed into this third and final round for v3.1. target: Remove ifdeffed code in t_g_process_write target: Remove direct ramdisk code target: Rename task_sg_num to task_sg_nents target: Remove custom debug macros for pr_debug. Use pr_err(). target: Remove custom debug macros in mainline fabrics target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0 target: Remove transport do_se_mem_map callback target: Further simplify transport_free_pages target: Redo task allocation return value handling target: Remove extra parentheses target: change alloc_task call to take *cdb, not *cmd (nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev) Signed-off-by: Andy Grover Reviewed-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/loopback/Kconfig | 6 - drivers/target/loopback/tcm_loop.c | 94 ++-- drivers/target/loopback/tcm_loop.h | 6 - drivers/target/target_core_alua.c | 168 +++---- drivers/target/target_core_cdb.c | 50 +- drivers/target/target_core_configfs.c | 464 +++++++++--------- drivers/target/target_core_device.c | 214 ++++----- drivers/target/target_core_fabric_configfs.c | 110 ++--- drivers/target/target_core_fabric_lib.c | 10 +- drivers/target/target_core_file.c | 100 ++-- drivers/target/target_core_file.h | 2 - drivers/target/target_core_hba.c | 12 +- drivers/target/target_core_iblock.c | 116 +++-- drivers/target/target_core_iblock.h | 1 - drivers/target/target_core_pr.c | 454 +++++++++--------- drivers/target/target_core_pscsi.c | 183 +++---- drivers/target/target_core_pscsi.h | 3 +- drivers/target/target_core_rd.c | 453 +++--------------- drivers/target/target_core_rd.h | 2 - drivers/target/target_core_tmr.c | 59 +-- drivers/target/target_core_tpg.c | 75 ++- drivers/target/target_core_transport.c | 690 ++++++++++----------------- drivers/target/target_core_ua.c | 30 +- drivers/target/tcm_fc/tcm_fc.h | 24 - drivers/target/tcm_fc/tfc_cmd.c | 37 +- drivers/target/tcm_fc/tfc_conf.c | 33 +- drivers/target/tcm_fc/tfc_io.c | 8 +- drivers/target/tcm_fc/tfc_sess.c | 18 +- include/target/target_core_base.h | 2 +- include/target/target_core_transport.h | 7 +- 30 files changed, 1387 insertions(+), 2044 deletions(-) (limited to 'include') diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig index 57dcbc2d711b..abe8ecbcdf06 100644 --- a/drivers/target/loopback/Kconfig +++ b/drivers/target/loopback/Kconfig @@ -3,9 +3,3 @@ config LOOPBACK_TARGET help Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD fabric loopback module. - -config LOOPBACK_TARGET_CDB_DEBUG - bool "TCM loopback fabric module CDB debug code" - depends on LOOPBACK_TARGET - help - Say Y here to enable the TCM loopback fabric module CDB debug code diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 99603bc45786..aa2d67997235 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -79,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd( tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC); if (!tl_cmd) { - printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n"); + pr_err("Unable to allocate struct tcm_loop_cmd\n"); set_host_byte(sc, DID_ERROR); return NULL; } @@ -281,7 +281,7 @@ static int tcm_loop_queuecommand( struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; - TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" + pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x" " scsi_buf_len: %u\n", sc->device->host->host_no, sc->device->id, sc->device->channel, sc->device->lun, sc->cmnd[0], scsi_bufflen(sc)); @@ -331,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) */ tl_nexus = tl_hba->tl_nexus; if (!tl_nexus) { - printk(KERN_ERR "Unable to perform device reset without" + pr_err("Unable to perform device reset without" " active I_T Nexus\n"); return FAILED; } @@ -344,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL); if (!tl_cmd) { - printk(KERN_ERR "Unable to allocate memory for tl_cmd\n"); + pr_err("Unable to allocate memory for tl_cmd\n"); return FAILED; } tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); if (!tl_tmr) { - printk(KERN_ERR "Unable to allocate memory for tl_tmr\n"); + pr_err("Unable to allocate memory for tl_tmr\n"); goto release; } init_waitqueue_head(&tl_tmr->tl_tmr_wait); @@ -435,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev) sh = scsi_host_alloc(&tcm_loop_driver_template, sizeof(struct tcm_loop_hba)); if (!sh) { - printk(KERN_ERR "Unable to allocate struct scsi_host\n"); + pr_err("Unable to allocate struct scsi_host\n"); return -ENODEV; } tl_hba->sh = sh; @@ -454,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev) error = scsi_add_host(sh, &tl_hba->dev); if (error) { - printk(KERN_ERR "%s: scsi_add_host failed\n", __func__); + pr_err("%s: scsi_add_host failed\n", __func__); scsi_host_put(sh); return -ENODEV; } @@ -495,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host ret = device_register(&tl_hba->dev); if (ret) { - printk(KERN_ERR "device_register() failed for" + pr_err("device_register() failed for" " tl_hba->dev: %d\n", ret); return -ENODEV; } @@ -513,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void) tcm_loop_primary = root_device_register("tcm_loop_0"); if (IS_ERR(tcm_loop_primary)) { - printk(KERN_ERR "Unable to allocate tcm_loop_primary\n"); + pr_err("Unable to allocate tcm_loop_primary\n"); return PTR_ERR(tcm_loop_primary); } ret = bus_register(&tcm_loop_lld_bus); if (ret) { - printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n"); + pr_err("bus_register() failed for tcm_loop_lld_bus\n"); goto dev_unreg; } ret = driver_register(&tcm_loop_driverfs); if (ret) { - printk(KERN_ERR "driver_register() failed for" + pr_err("driver_register() failed for" "tcm_loop_driverfs\n"); goto bus_unreg; } - printk(KERN_INFO "Initialized TCM Loop Core Bus\n"); + pr_debug("Initialized TCM Loop Core Bus\n"); return ret; bus_unreg: @@ -546,7 +546,7 @@ static void tcm_loop_release_core_bus(void) bus_unregister(&tcm_loop_lld_bus); root_device_unregister(tcm_loop_primary); - printk(KERN_INFO "Releasing TCM Loop Core BUS\n"); + pr_debug("Releasing TCM Loop Core BUS\n"); } static char *tcm_loop_get_fabric_name(void) @@ -574,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg) case SCSI_PROTOCOL_ISCSI: return iscsi_get_fabric_proto_ident(se_tpg); default: - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" + pr_err("Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } @@ -630,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id( return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, format_code, buf); default: - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" + pr_err("Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } @@ -660,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len( return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, format_code); default: - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" + pr_err("Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } @@ -694,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id( return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, port_nexus_ptr); default: - printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using" + pr_err("Unknown tl_proto_id: 0x%02x, using" " SAS emulation\n", tl_hba->tl_proto_id); break; } @@ -743,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl( tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL); if (!tl_nacl) { - printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n"); + pr_err("Unable to allocate struct tcm_loop_nacl\n"); return NULL; } @@ -853,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p" + pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p" " cdb: 0x%02x\n", sc, sc->cmnd[0]); sc->result = SAM_STAT_GOOD; @@ -868,7 +868,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) struct tcm_loop_cmd, tl_se_cmd); struct scsi_cmnd *sc = tl_cmd->sc; - TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p" + pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p" " cdb: 0x%02x\n", sc, sc->cmnd[0]); if (se_cmd->sense_buffer && @@ -943,7 +943,7 @@ static int tcm_loop_port_link( */ scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); - printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n"); + pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n"); return 0; } @@ -961,7 +961,7 @@ static void tcm_loop_port_unlink( sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); if (!sd) { - printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:" + pr_err("Unable to locate struct scsi_device for %d:%d:" "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun); return; } @@ -974,7 +974,7 @@ static void tcm_loop_port_unlink( atomic_dec(&tl_tpg->tl_tpg_port_count); smp_mb__after_atomic_dec(); - printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n"); + pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); } /* End items for tcm_loop_port_cit */ @@ -991,14 +991,14 @@ static int tcm_loop_make_nexus( int ret = -ENOMEM; if (tl_tpg->tl_hba->tl_nexus) { - printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n"); + pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n"); return -EEXIST; } se_tpg = &tl_tpg->tl_se_tpg; tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL); if (!tl_nexus) { - printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n"); + pr_err("Unable to allocate struct tcm_loop_nexus\n"); return -ENOMEM; } /* @@ -1027,7 +1027,7 @@ static int tcm_loop_make_nexus( __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl, tl_nexus->se_sess, tl_nexus); tl_tpg->tl_hba->tl_nexus = tl_nexus; - printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated" + pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), name); return 0; @@ -1053,13 +1053,13 @@ static int tcm_loop_drop_nexus( return -ENODEV; if (atomic_read(&tpg->tl_tpg_port_count)) { - printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with" + pr_err("Unable to remove TCM_Loop I_T Nexus with" " active TPG port count: %d\n", atomic_read(&tpg->tl_tpg_port_count)); return -EPERM; } - printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" + pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated" " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba), tl_nexus->se_sess->se_node_acl->initiatorname); /* @@ -1115,7 +1115,7 @@ static ssize_t tcm_loop_tpg_store_nexus( * tcm_loop_make_nexus() */ if (strlen(page) >= TL_WWN_ADDR_LEN) { - printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds" + pr_err("Emulated NAA Sas Address: %s, exceeds" " max: %d\n", page, TL_WWN_ADDR_LEN); return -EINVAL; } @@ -1124,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus( ptr = strstr(i_port, "naa."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) { - printk(KERN_ERR "Passed SAS Initiator Port %s does not" + pr_err("Passed SAS Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; @@ -1135,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus( ptr = strstr(i_port, "fc."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) { - printk(KERN_ERR "Passed FCP Initiator Port %s does not" + pr_err("Passed FCP Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; @@ -1146,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus( ptr = strstr(i_port, "iqn."); if (ptr) { if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) { - printk(KERN_ERR "Passed iSCSI Initiator Port %s does not" + pr_err("Passed iSCSI Initiator Port %s does not" " match target port protoid: %s\n", i_port, tcm_loop_dump_proto_id(tl_hba)); return -EINVAL; @@ -1154,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus( port_ptr = &i_port[0]; goto check_newline; } - printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:" + pr_err("Unable to locate prefix for emulated Initiator Port:" " %s\n", i_port); return -EINVAL; /* @@ -1194,7 +1194,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg( tpgt_str = strstr(name, "tpgt_"); if (!tpgt_str) { - printk(KERN_ERR "Unable to locate \"tpgt_#\" directory" + pr_err("Unable to locate \"tpgt_#\" directory" " group\n"); return ERR_PTR(-EINVAL); } @@ -1202,7 +1202,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg( tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0); if (tpgt >= TL_TPGS_PER_HBA) { - printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" + pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:" " %u\n", tpgt, TL_TPGS_PER_HBA); return ERR_PTR(-EINVAL); } @@ -1218,7 +1218,7 @@ struct se_portal_group *tcm_loop_make_naa_tpg( if (ret < 0) return ERR_PTR(-ENOMEM); - printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s" + pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); @@ -1245,7 +1245,7 @@ void tcm_loop_drop_naa_tpg( */ core_tpg_deregister(se_tpg); - printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s" + pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s" " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba), config_item_name(&wwn->wwn_group.cg_item), tpgt); } @@ -1266,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba( tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL); if (!tl_hba) { - printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n"); + pr_err("Unable to allocate struct tcm_loop_hba\n"); return ERR_PTR(-ENOMEM); } /* @@ -1286,7 +1286,7 @@ struct se_wwn *tcm_loop_make_scsi_hba( } ptr = strstr(name, "iqn."); if (!ptr) { - printk(KERN_ERR "Unable to locate prefix for emulated Target " + pr_err("Unable to locate prefix for emulated Target " "Port: %s\n", name); ret = -EINVAL; goto out; @@ -1295,7 +1295,7 @@ struct se_wwn *tcm_loop_make_scsi_hba( check_len: if (strlen(name) >= TL_WWN_ADDR_LEN) { - printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds" + pr_err("Emulated NAA %s Address: %s, exceeds" " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN); ret = -EINVAL; @@ -1314,7 +1314,7 @@ check_len: sh = tl_hba->sh; tcm_loop_hba_no_cnt++; - printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target" + pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target" " %s Address: %s at Linux/SCSI Host ID: %d\n", tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); @@ -1337,7 +1337,7 @@ void tcm_loop_drop_scsi_hba( */ device_unregister(&tl_hba->dev); - printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target" + pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target" " SAS Address: %s at Linux/SCSI Host ID: %d\n", config_item_name(&wwn->wwn_group.cg_item), host_no); } @@ -1373,7 +1373,7 @@ static int tcm_loop_register_configfs(void) */ fabric = target_fabric_configfs_init(THIS_MODULE, "loopback"); if (IS_ERR(fabric)) { - printk(KERN_ERR "tcm_loop_register_configfs() failed!\n"); + pr_err("tcm_loop_register_configfs() failed!\n"); return PTR_ERR(fabric); } /* @@ -1464,7 +1464,7 @@ static int tcm_loop_register_configfs(void) */ ret = target_fabric_configfs_register(fabric); if (ret < 0) { - printk(KERN_ERR "target_fabric_configfs_register() for" + pr_err("target_fabric_configfs_register() for" " TCM_Loop failed!\n"); target_fabric_configfs_free(fabric); return -1; @@ -1473,7 +1473,7 @@ static int tcm_loop_register_configfs(void) * Setup our local pointer to *fabric. */ tcm_loop_fabric_configfs = fabric; - printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->" + pr_debug("TCM_LOOP[0] - Set fabric ->" " tcm_loop_fabric_configfs\n"); return 0; } @@ -1485,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void) target_fabric_configfs_deregister(tcm_loop_fabric_configfs); tcm_loop_fabric_configfs = NULL; - printk(KERN_INFO "TCM_LOOP[0] - Cleared" + pr_debug("TCM_LOOP[0] - Cleared" " tcm_loop_fabric_configfs\n"); } @@ -1498,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void) __alignof__(struct tcm_loop_cmd), 0, NULL); if (!tcm_loop_cmd_cache) { - printk(KERN_ERR "kmem_cache_create() for" + pr_debug("kmem_cache_create() for" " tcm_loop_cmd_cache failed\n"); return -ENOMEM; } diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index 7e9f7ab45548..6b76c7a22bb0 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -16,12 +16,6 @@ */ #define TL_SCSI_MAX_CMD_LEN 32 -#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG -# define TL_CDB_DEBUG(x...) printk(KERN_INFO x) -#else -# define TL_CDB_DEBUG(x...) -#endif - struct tcm_loop_cmd { /* State of Linux/SCSI CDB+Data descriptor */ u32 sc_cmd_state; diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index dba412ff3718..98c98a3a0250 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -167,7 +167,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) int alua_access_state, primary = 0, rc; u16 tg_pt_id, rtpi; - if (!(l_port)) + if (!l_port) return PYX_TRANSPORT_LU_COMM_FAILURE; buf = transport_kmap_first_data_page(cmd); @@ -177,24 +177,24 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) * for the local tg_pt_gp. */ l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem; - if (!(l_tg_pt_gp_mem)) { - printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); + if (!l_tg_pt_gp_mem) { + pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n"); rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; goto out; } spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp; - if (!(l_tg_pt_gp)) { + if (!l_tg_pt_gp) { spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); - printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); + pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n"); rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; goto out; } rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA); spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock); - if (!(rc)) { - printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS" + if (!rc) { + pr_debug("Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICT_ALUA is disabled\n"); rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; goto out; @@ -249,7 +249,7 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd) list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { - if (!(tg_pt_gp->tg_pt_gp_valid_id)) + if (!tg_pt_gp->tg_pt_gp_valid_id) continue; if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) @@ -498,7 +498,7 @@ static int core_alua_state_check( struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; int out_alua_state, nonop_delay_msecs; - if (!(port)) + if (!port) return 0; /* * First, check for a struct se_port specific secondary ALUA target port @@ -506,7 +506,7 @@ static int core_alua_state_check( */ if (atomic_read(&port->sep_tg_pt_secondary_offline)) { *alua_ascq = ASCQ_04H_ALUA_OFFLINE; - printk(KERN_INFO "ALUA: Got secondary offline status for local" + pr_debug("ALUA: Got secondary offline status for local" " target port\n"); *alua_ascq = ASCQ_04H_ALUA_OFFLINE; return 1; @@ -548,7 +548,7 @@ static int core_alua_state_check( */ case ALUA_ACCESS_STATE_OFFLINE: default: - printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", + pr_err("Unknown ALUA access state: 0x%02x\n", out_alua_state); return -EINVAL; } @@ -580,7 +580,7 @@ static int core_alua_check_transition(int state, int *primary) *primary = 0; break; default: - printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state); + pr_err("Unknown ALUA access state: 0x%02x\n", state); return -EINVAL; } @@ -638,7 +638,7 @@ int core_alua_check_nonop_delay( * The ALUA Active/NonOptimized access state delay can be disabled * in via configfs with a value of zero */ - if (!(cmd->alua_nonop_delay)) + if (!cmd->alua_nonop_delay) return 0; /* * struct se_cmd->alua_nonop_delay gets set by a target port group @@ -667,7 +667,7 @@ static int core_alua_write_tpg_metadata( file = filp_open(path, flags, 0600); if (IS_ERR(file) || !file || !file->f_dentry) { - printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n", + pr_err("filp_open(%s) for ALUA metadata failed\n", path); return -ENODEV; } @@ -681,7 +681,7 @@ static int core_alua_write_tpg_metadata( set_fs(old_fs); if (ret < 0) { - printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path); + pr_err("Error writing ALUA metadata file: %s\n", path); filp_close(file, NULL); return -EIO; } @@ -778,7 +778,7 @@ static int core_alua_do_transition_tg_pt( * se_deve->se_lun_acl pointer may be NULL for a * entry created without explict Node+MappedLUN ACLs */ - if (!(lacl)) + if (!lacl) continue; if (explict && @@ -820,7 +820,7 @@ static int core_alua_do_transition_tg_pt( */ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state); - printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" " from primary access state %s to %s\n", (explict) ? "explict" : "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state), @@ -851,8 +851,8 @@ int core_alua_do_port_transition( return -EINVAL; md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL); - if (!(md_buf)) { - printk("Unable to allocate buf for ALUA metadata\n"); + if (!md_buf) { + pr_err("Unable to allocate buf for ALUA metadata\n"); return -ENOMEM; } @@ -867,7 +867,7 @@ int core_alua_do_port_transition( * we only do transition on the passed *l_tp_pt_gp, and not * on all of the matching target port groups IDs in default_lu_gp. */ - if (!(lu_gp->lu_gp_id)) { + if (!lu_gp->lu_gp_id) { /* * core_alua_do_transition_tg_pt() will always return * success. @@ -899,7 +899,7 @@ int core_alua_do_port_transition( &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { - if (!(tg_pt_gp->tg_pt_gp_valid_id)) + if (!tg_pt_gp->tg_pt_gp_valid_id) continue; /* * If the target behavior port asymmetric access state @@ -941,7 +941,7 @@ int core_alua_do_port_transition( } spin_unlock(&lu_gp->lu_gp_lock); - printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT" + pr_debug("Successfully processed LU Group: %s all ALUA TG PT" " Group IDs: %hu %s transition to primary state: %s\n", config_item_name(&lu_gp->lu_gp_group.cg_item), l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict", @@ -1001,9 +1001,9 @@ static int core_alua_set_tg_pt_secondary_state( spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if (!(tg_pt_gp)) { + if (!tg_pt_gp) { spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); - printk(KERN_ERR "Unable to complete secondary state" + pr_err("Unable to complete secondary state" " transition\n"); return -EINVAL; } @@ -1022,7 +1022,7 @@ static int core_alua_set_tg_pt_secondary_state( ALUA_STATUS_ALTERED_BY_EXPLICT_STPG : ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA; - printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu" + pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu" " to secondary access state: %s\n", (explict) ? "explict" : "implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); @@ -1040,8 +1040,8 @@ static int core_alua_set_tg_pt_secondary_state( */ if (port->sep_tg_pt_secondary_write_md) { md_buf = kzalloc(md_buf_len, GFP_KERNEL); - if (!(md_buf)) { - printk(KERN_ERR "Unable to allocate md_buf for" + if (!md_buf) { + pr_err("Unable to allocate md_buf for" " secondary ALUA access metadata\n"); return -ENOMEM; } @@ -1062,8 +1062,8 @@ core_alua_allocate_lu_gp(const char *name, int def_group) struct t10_alua_lu_gp *lu_gp; lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL); - if (!(lu_gp)) { - printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n"); + if (!lu_gp) { + pr_err("Unable to allocate struct t10_alua_lu_gp\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&lu_gp->lu_gp_node); @@ -1088,14 +1088,14 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id) * The lu_gp->lu_gp_id may only be set once.. */ if (lu_gp->lu_gp_valid_id) { - printk(KERN_WARNING "ALUA LU Group already has a valid ID," + pr_warn("ALUA LU Group already has a valid ID," " ignoring request\n"); return -EINVAL; } spin_lock(&lu_gps_lock); if (alua_lu_gps_count == 0x0000ffff) { - printk(KERN_ERR "Maximum ALUA alua_lu_gps_count:" + pr_err("Maximum ALUA alua_lu_gps_count:" " 0x0000ffff reached\n"); spin_unlock(&lu_gps_lock); kmem_cache_free(t10_alua_lu_gp_cache, lu_gp); @@ -1107,10 +1107,10 @@ again: list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) { if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) { - if (!(lu_gp_id)) + if (!lu_gp_id) goto again; - printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu" + pr_warn("ALUA Logical Unit Group ID: %hu" " already exists, ignoring request\n", lu_gp_id); spin_unlock(&lu_gps_lock); @@ -1133,8 +1133,8 @@ core_alua_allocate_lu_gp_mem(struct se_device *dev) struct t10_alua_lu_gp_member *lu_gp_mem; lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL); - if (!(lu_gp_mem)) { - printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n"); + if (!lu_gp_mem) { + pr_err("Unable to allocate struct t10_alua_lu_gp_member\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list); @@ -1218,7 +1218,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev) return; lu_gp_mem = dev->dev_alua_lu_gp_mem; - if (!(lu_gp_mem)) + if (!lu_gp_mem) return; while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt)) @@ -1226,7 +1226,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev) spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; - if ((lu_gp)) { + if (lu_gp) { spin_lock(&lu_gp->lu_gp_lock); if (lu_gp_mem->lu_gp_assoc) { list_del(&lu_gp_mem->lu_gp_mem_list); @@ -1248,10 +1248,10 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name) spin_lock(&lu_gps_lock); list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) { - if (!(lu_gp->lu_gp_valid_id)) + if (!lu_gp->lu_gp_valid_id) continue; ci = &lu_gp->lu_gp_group.cg_item; - if (!(strcmp(config_item_name(ci), name))) { + if (!strcmp(config_item_name(ci), name)) { atomic_inc(&lu_gp->lu_gp_ref_cnt); spin_unlock(&lu_gps_lock); return lu_gp; @@ -1307,8 +1307,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp( struct t10_alua_tg_pt_gp *tg_pt_gp; tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL); - if (!(tg_pt_gp)) { - printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n"); + if (!tg_pt_gp) { + pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n"); return NULL; } INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list); @@ -1356,14 +1356,14 @@ int core_alua_set_tg_pt_gp_id( * The tg_pt_gp->tg_pt_gp_id may only be set once.. */ if (tg_pt_gp->tg_pt_gp_valid_id) { - printk(KERN_WARNING "ALUA TG PT Group already has a valid ID," + pr_warn("ALUA TG PT Group already has a valid ID," " ignoring request\n"); return -EINVAL; } spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) { - printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:" + pr_err("Maximum ALUA alua_tg_pt_gps_count:" " 0x0000ffff reached\n"); spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); @@ -1376,10 +1376,10 @@ again: list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) { - if (!(tg_pt_gp_id)) + if (!tg_pt_gp_id) goto again; - printk(KERN_ERR "ALUA Target Port Group ID: %hu already" + pr_err("ALUA Target Port Group ID: %hu already" " exists, ignoring request\n", tg_pt_gp_id); spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); return -EINVAL; @@ -1403,8 +1403,8 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem( tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache, GFP_KERNEL); - if (!(tg_pt_gp_mem)) { - printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n"); + if (!tg_pt_gp_mem) { + pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list); @@ -1491,7 +1491,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port) return; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; - if (!(tg_pt_gp_mem)) + if (!tg_pt_gp_mem) return; while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt)) @@ -1499,7 +1499,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port) spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if ((tg_pt_gp)) { + if (tg_pt_gp) { spin_lock(&tg_pt_gp->tg_pt_gp_lock); if (tg_pt_gp_mem->tg_pt_gp_assoc) { list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list); @@ -1524,10 +1524,10 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name( spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, tg_pt_gp_list) { - if (!(tg_pt_gp->tg_pt_gp_valid_id)) + if (!tg_pt_gp->tg_pt_gp_valid_id) continue; ci = &tg_pt_gp->tg_pt_gp_group.cg_item; - if (!(strcmp(config_item_name(ci), name))) { + if (!strcmp(config_item_name(ci), name)) { atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock); return tg_pt_gp; @@ -1592,12 +1592,12 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page) return len; tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; - if (!(tg_pt_gp_mem)) + if (!tg_pt_gp_mem) return len; spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if ((tg_pt_gp)) { + if (tg_pt_gp) { tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" " %hu\nTG Port Primary Access State: %s\nTG Port " @@ -1634,7 +1634,7 @@ ssize_t core_alua_store_tg_pt_gp_info( lun = port->sep_lun; if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { - printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for" + pr_warn("SPC3_ALUA_EMULATED not enabled for" " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpg->se_tpg_tfo->tpg_get_tag(tpg), config_item_name(&lun->lun_group.cg_item)); @@ -1642,7 +1642,7 @@ ssize_t core_alua_store_tg_pt_gp_info( } if (count > TG_PT_GROUP_NAME_BUF) { - printk(KERN_ERR "ALUA Target Port Group alias too large!\n"); + pr_err("ALUA Target Port Group alias too large!\n"); return -EINVAL; } memset(buf, 0, TG_PT_GROUP_NAME_BUF); @@ -1659,26 +1659,26 @@ ssize_t core_alua_store_tg_pt_gp_info( */ tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev, strstrip(buf)); - if (!(tg_pt_gp_new)) + if (!tg_pt_gp_new) return -ENODEV; } tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; - if (!(tg_pt_gp_mem)) { + if (!tg_pt_gp_mem) { if (tg_pt_gp_new) core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); - printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); + pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n"); return -EINVAL; } spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if ((tg_pt_gp)) { + if (tg_pt_gp) { /* * Clearing an existing tg_pt_gp association, and replacing * with the default_tg_pt_gp. */ - if (!(tg_pt_gp_new)) { - printk(KERN_INFO "Target_Core_ConfigFS: Moving" + if (!tg_pt_gp_new) { + pr_debug("Target_Core_ConfigFS: Moving" " %s/tpgt_%hu/%s from ALUA Target Port Group:" " alua/%s, ID: %hu back to" " default_tg_pt_gp\n", @@ -1707,7 +1707,7 @@ ssize_t core_alua_store_tg_pt_gp_info( */ __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); - printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" + pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA" " Target Port Group: alua/%s, ID: %hu\n", (move) ? "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpg->se_tpg_tfo->tpg_get_tag(tpg), @@ -1744,11 +1744,11 @@ ssize_t core_alua_store_access_type( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_access_type\n"); + pr_err("Unable to extract alua_access_type\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) { - printk(KERN_ERR "Illegal value for alua_access_type:" + pr_err("Illegal value for alua_access_type:" " %lu\n", tmp); return -EINVAL; } @@ -1782,11 +1782,11 @@ ssize_t core_alua_store_nonop_delay_msecs( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract nonop_delay_msecs\n"); + pr_err("Unable to extract nonop_delay_msecs\n"); return -EINVAL; } if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) { - printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds" + pr_err("Passed nonop_delay_msecs: %lu, exceeds" " ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, ALUA_MAX_NONOP_DELAY_MSECS); return -EINVAL; @@ -1813,11 +1813,11 @@ ssize_t core_alua_store_trans_delay_msecs( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract trans_delay_msecs\n"); + pr_err("Unable to extract trans_delay_msecs\n"); return -EINVAL; } if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) { - printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds" + pr_err("Passed trans_delay_msecs: %lu, exceeds" " ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, ALUA_MAX_TRANS_DELAY_MSECS); return -EINVAL; @@ -1844,11 +1844,11 @@ ssize_t core_alua_store_preferred_bit( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract preferred ALUA value\n"); + pr_err("Unable to extract preferred ALUA value\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp); + pr_err("Illegal value for preferred ALUA: %lu\n", tmp); return -EINVAL; } tg_pt_gp->tg_pt_gp_pref = (int)tmp; @@ -1858,7 +1858,7 @@ ssize_t core_alua_store_preferred_bit( ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page) { - if (!(lun->lun_sep)) + if (!lun->lun_sep) return -ENODEV; return sprintf(page, "%d\n", @@ -1874,22 +1874,22 @@ ssize_t core_alua_store_offline_bit( unsigned long tmp; int ret; - if (!(lun->lun_sep)) + if (!lun->lun_sep) return -ENODEV; ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n"); + pr_err("Unable to extract alua_tg_pt_offline value\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n", + pr_err("Illegal value for alua_tg_pt_offline: %lu\n", tmp); return -EINVAL; } tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem; - if (!(tg_pt_gp_mem)) { - printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n"); + if (!tg_pt_gp_mem) { + pr_err("Unable to locate *tg_pt_gp_mem\n"); return -EINVAL; } @@ -1918,13 +1918,13 @@ ssize_t core_alua_store_secondary_status( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_tg_pt_status\n"); + pr_err("Unable to extract alua_tg_pt_status\n"); return -EINVAL; } if ((tmp != ALUA_STATUS_NONE) && (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { - printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n", + pr_err("Illegal value for alua_tg_pt_status: %lu\n", tmp); return -EINVAL; } @@ -1951,11 +1951,11 @@ ssize_t core_alua_store_secondary_write_metadata( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n"); + pr_err("Unable to extract alua_tg_pt_write_md\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:" + pr_err("Illegal value for alua_tg_pt_write_md:" " %lu\n", tmp); return -EINVAL; } @@ -1979,7 +1979,7 @@ int core_setup_alua(struct se_device *dev, int force_pt) !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) { alua->alua_type = SPC_ALUA_PASSTHROUGH; alua->alua_state_check = &core_alua_state_check_nop; - printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" + pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA" " emulation\n", dev->transport->name); return 0; } @@ -1988,7 +1988,7 @@ int core_setup_alua(struct se_device *dev, int force_pt) * use emulated ALUA. */ if (dev->transport->get_device_rev(dev) >= SCSI_3) { - printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3" + pr_debug("%s: Enabling ALUA Emulation for SPC-3" " device\n", dev->transport->name); /* * Associate this struct se_device with the default ALUA @@ -2005,13 +2005,13 @@ int core_setup_alua(struct se_device *dev, int force_pt) default_lu_gp); spin_unlock(&lu_gp_mem->lu_gp_mem_lock); - printk(KERN_INFO "%s: Adding to default ALUA LU Group:" + pr_debug("%s: Adding to default ALUA LU Group:" " core/alua/lu_gps/default_lu_gp\n", dev->transport->name); } else { alua->alua_type = SPC2_ALUA_DISABLED; alua->alua_state_check = &core_alua_state_check_nop; - printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2" + pr_debug("%s: Disabling ALUA Emulation for SPC-2" " device\n", dev->transport->name); } diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 418282d926fa..982830023661 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -73,7 +73,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) * payload going back for EVPD=0 */ if (cmd->data_length < 6) { - printk(KERN_ERR "SCSI Inquiry payload length: %u" + pr_err("SCSI Inquiry payload length: %u" " too small for EVPD=0\n", cmd->data_length); return -EINVAL; } @@ -327,7 +327,7 @@ check_tpgi: spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; - if (!(tg_pt_gp)) { + if (!tg_pt_gp) { spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); goto check_lu_gp; } @@ -358,12 +358,12 @@ check_lu_gp: goto check_scsi_name; } lu_gp_mem = dev->dev_alua_lu_gp_mem; - if (!(lu_gp_mem)) + if (!lu_gp_mem) goto check_scsi_name; spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; - if (!(lu_gp)) { + if (!lu_gp) { spin_unlock(&lu_gp_mem->lu_gp_mem_lock); goto check_scsi_name; } @@ -475,14 +475,14 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) have_tp = 1; if (cmd->data_length < (0x10 + 4)) { - printk(KERN_INFO "Received data_length: %u" + pr_debug("Received data_length: %u" " too small for EVPD 0xb0\n", cmd->data_length); return -EINVAL; } if (have_tp && cmd->data_length < (0x3c + 4)) { - printk(KERN_INFO "Received data_length: %u" + pr_debug("Received data_length: %u" " too small for TPE=1 EVPD 0xb0\n", cmd->data_length); have_tp = 0; @@ -491,6 +491,9 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) buf[0] = dev->transport->get_device_type(dev); buf[3] = have_tp ? 0x3c : 0x10; + /* Set WSNZ to 1 */ + buf[4] = 0x01; + /* * Set OPTIMAL TRANSFER LENGTH GRANULARITY */ @@ -667,7 +670,7 @@ target_emulate_inquiry(struct se_cmd *cmd) * payload length left for the next outgoing EVPD metadata */ if (cmd->data_length < 4) { - printk(KERN_ERR "SCSI Inquiry payload length: %u" + pr_err("SCSI Inquiry payload length: %u" " too small for EVPD=1\n", cmd->data_length); return -EINVAL; } @@ -685,7 +688,7 @@ target_emulate_inquiry(struct se_cmd *cmd) } transport_kunmap_first_data_page(cmd); - printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]); + pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); return -EINVAL; } @@ -891,7 +894,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten) length += target_modesense_control(dev, &buf[offset+length]); break; default: - printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n", + pr_err("Got Unknown Mode Page: 0x%02x\n", cdb[2] & 0x3f); return PYX_TRANSPORT_UNKNOWN_MODE_PAGE; } @@ -947,14 +950,14 @@ target_emulate_request_sense(struct se_cmd *cmd) int err = 0; if (cdb[1] & 0x01) { - printk(KERN_ERR "REQUEST_SENSE description emulation not" + pr_err("REQUEST_SENSE description emulation not" " supported\n"); return PYX_TRANSPORT_INVALID_CDB_FIELD; } buf = transport_kmap_first_data_page(cmd); - if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) { + if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { /* * CURRENT ERROR, UNIT ATTENTION */ @@ -1028,18 +1031,18 @@ target_emulate_unmap(struct se_task *task) buf = transport_kmap_first_data_page(cmd); ptr = &buf[offset]; - printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" + pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); while (size) { lba = get_unaligned_be64(&ptr[0]); range = get_unaligned_be32(&ptr[8]); - printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n", + pr_debug("UNMAP: Using lba: %llu and range: %u\n", (unsigned long long)lba, range); ret = dev->transport->do_discard(dev, lba, range); if (ret < 0) { - printk(KERN_ERR "blkdev_issue_discard() failed: %d\n", + pr_err("blkdev_issue_discard() failed: %d\n", ret); goto err; } @@ -1084,12 +1087,12 @@ target_emulate_write_same(struct se_task *task, int write_same32) else range = (dev->transport->get_blocks(dev) - lba); - printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", + pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n", (unsigned long long)lba, (unsigned long long)range); ret = dev->transport->do_discard(dev, lba, range); if (ret < 0) { - printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n"); + pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); return ret; } @@ -1125,7 +1128,7 @@ transport_emulate_control_cdb(struct se_task *task) ret = target_emulate_readcapacity_16(cmd); break; default: - printk(KERN_ERR "Unsupported SA: 0x%02x\n", + pr_err("Unsupported SA: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } @@ -1135,7 +1138,7 @@ transport_emulate_control_cdb(struct se_task *task) break; case UNMAP: if (!dev->transport->do_discard) { - printk(KERN_ERR "UNMAP emulation not supported for: %s\n", + pr_err("UNMAP emulation not supported for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } @@ -1143,7 +1146,7 @@ transport_emulate_control_cdb(struct se_task *task) break; case WRITE_SAME_16: if (!dev->transport->do_discard) { - printk(KERN_ERR "WRITE_SAME_16 emulation not supported" + pr_err("WRITE_SAME_16 emulation not supported" " for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } @@ -1155,7 +1158,7 @@ transport_emulate_control_cdb(struct se_task *task) switch (service_action) { case WRITE_SAME_32: if (!dev->transport->do_discard) { - printk(KERN_ERR "WRITE_SAME_32 SA emulation not" + pr_err("WRITE_SAME_32 SA emulation not" " supported for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; @@ -1163,7 +1166,7 @@ transport_emulate_control_cdb(struct se_task *task) ret = target_emulate_write_same(task, 1); break; default: - printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:" + pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" " 0x%02x\n", service_action); break; } @@ -1171,8 +1174,7 @@ transport_emulate_control_cdb(struct se_task *task) case SYNCHRONIZE_CACHE: case 0x91: /* SYNCHRONIZE_CACHE_16: */ if (!dev->transport->do_sync_cache) { - printk(KERN_ERR - "SYNCHRONIZE_CACHE emulation not supported" + pr_err("SYNCHRONIZE_CACHE emulation not supported" " for: %s\n", dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } @@ -1189,7 +1191,7 @@ transport_emulate_control_cdb(struct se_task *task) case WRITE_FILEMARKS: break; default: - printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n", + pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n", cmd->t_task_cdb[0], dev->transport->name); return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; } diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 6b00810b8dcb..e56c39daeec6 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -104,12 +104,12 @@ static struct target_fabric_configfs *target_core_get_fabric( { struct target_fabric_configfs *tf; - if (!(name)) + if (!name) return NULL; mutex_lock(&g_tf_lock); list_for_each_entry(tf, &g_tf_list, tf_list) { - if (!(strcmp(tf->tf_name, name))) { + if (!strcmp(tf->tf_name, name)) { atomic_inc(&tf->tf_access_cnt); mutex_unlock(&g_tf_lock); return tf; @@ -130,7 +130,7 @@ static struct config_group *target_core_register_fabric( struct target_fabric_configfs *tf; int ret; - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:" + pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:" " %s\n", group, name); /* * Ensure that TCM subsystem plugins are loaded at this point for @@ -150,7 +150,7 @@ static struct config_group *target_core_register_fabric( * registered, but simply provids auto loading logic for modules with * mkdir(2) system calls with known TCM fabric modules. */ - if (!(strncmp(name, "iscsi", 5))) { + if (!strncmp(name, "iscsi", 5)) { /* * Automatically load the LIO Target fabric module when the * following is called: @@ -159,11 +159,11 @@ static struct config_group *target_core_register_fabric( */ ret = request_module("iscsi_target_mod"); if (ret < 0) { - printk(KERN_ERR "request_module() failed for" + pr_err("request_module() failed for" " iscsi_target_mod.ko: %d\n", ret); return ERR_PTR(-EINVAL); } - } else if (!(strncmp(name, "loopback", 8))) { + } else if (!strncmp(name, "loopback", 8)) { /* * Automatically load the tcm_loop fabric module when the * following is called: @@ -172,25 +172,25 @@ static struct config_group *target_core_register_fabric( */ ret = request_module("tcm_loop"); if (ret < 0) { - printk(KERN_ERR "request_module() failed for" + pr_err("request_module() failed for" " tcm_loop.ko: %d\n", ret); return ERR_PTR(-EINVAL); } } tf = target_core_get_fabric(name); - if (!(tf)) { - printk(KERN_ERR "target_core_get_fabric() failed for %s\n", + if (!tf) { + pr_err("target_core_get_fabric() failed for %s\n", name); return ERR_PTR(-EINVAL); } - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:" + pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:" " %s\n", tf->tf_name); /* * On a successful target_core_get_fabric() look, the returned * struct target_fabric_configfs *tf will contain a usage reference. */ - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", + pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", &TF_CIT_TMPL(tf)->tfc_wwn_cit); tf->tf_group.default_groups = tf->tf_default_groups; @@ -202,14 +202,14 @@ static struct config_group *target_core_register_fabric( config_group_init_type_name(&tf->tf_disc_group, "discovery_auth", &TF_CIT_TMPL(tf)->tfc_discovery_cit); - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" + pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" " %s\n", tf->tf_group.cg_item.ci_name); /* * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item() */ tf->tf_ops.tf_subsys = tf->tf_subsys; tf->tf_fabric = &tf->tf_group.cg_item; - printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" + pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric" " for %s\n", name); return &tf->tf_group; @@ -228,18 +228,18 @@ static void target_core_deregister_fabric( struct config_item *df_item; int i; - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in" " tf list\n", config_item_name(item)); - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:" " %s\n", tf->tf_name); atomic_dec(&tf->tf_access_cnt); - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing" " tf->tf_fabric for %s\n", tf->tf_name); tf->tf_fabric = NULL; - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci" " %s\n", config_item_name(item)); tf_group = &tf->tf_group; @@ -307,17 +307,17 @@ struct target_fabric_configfs *target_fabric_configfs_init( struct target_fabric_configfs *tf; if (!(name)) { - printk(KERN_ERR "Unable to locate passed fabric name\n"); + pr_err("Unable to locate passed fabric name\n"); return ERR_PTR(-EINVAL); } if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) { - printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC" + pr_err("Passed name: %s exceeds TARGET_FABRIC" "_NAME_SIZE\n", name); return ERR_PTR(-EINVAL); } tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL); - if (!(tf)) + if (!tf) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&tf->tf_list); @@ -336,9 +336,9 @@ struct target_fabric_configfs *target_fabric_configfs_init( list_add_tail(&tf->tf_list, &g_tf_list); mutex_unlock(&g_tf_lock); - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" + pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>" ">>>>>>>>>>>>>>\n"); - printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for" + pr_debug("Initialized struct target_fabric_configfs: %p for" " %s\n", tf, tf->tf_name); return tf; } @@ -367,132 +367,132 @@ static int target_fabric_tf_ops_check( { struct target_core_fabric_ops *tfo = &tf->tf_ops; - if (!(tfo->get_fabric_name)) { - printk(KERN_ERR "Missing tfo->get_fabric_name()\n"); + if (!tfo->get_fabric_name) { + pr_err("Missing tfo->get_fabric_name()\n"); return -EINVAL; } - if (!(tfo->get_fabric_proto_ident)) { - printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n"); + if (!tfo->get_fabric_proto_ident) { + pr_err("Missing tfo->get_fabric_proto_ident()\n"); return -EINVAL; } - if (!(tfo->tpg_get_wwn)) { - printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n"); + if (!tfo->tpg_get_wwn) { + pr_err("Missing tfo->tpg_get_wwn()\n"); return -EINVAL; } - if (!(tfo->tpg_get_tag)) { - printk(KERN_ERR "Missing tfo->tpg_get_tag()\n"); + if (!tfo->tpg_get_tag) { + pr_err("Missing tfo->tpg_get_tag()\n"); return -EINVAL; } - if (!(tfo->tpg_get_default_depth)) { - printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n"); + if (!tfo->tpg_get_default_depth) { + pr_err("Missing tfo->tpg_get_default_depth()\n"); return -EINVAL; } - if (!(tfo->tpg_get_pr_transport_id)) { - printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n"); + if (!tfo->tpg_get_pr_transport_id) { + pr_err("Missing tfo->tpg_get_pr_transport_id()\n"); return -EINVAL; } - if (!(tfo->tpg_get_pr_transport_id_len)) { - printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n"); + if (!tfo->tpg_get_pr_transport_id_len) { + pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n"); return -EINVAL; } - if (!(tfo->tpg_check_demo_mode)) { - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n"); + if (!tfo->tpg_check_demo_mode) { + pr_err("Missing tfo->tpg_check_demo_mode()\n"); return -EINVAL; } - if (!(tfo->tpg_check_demo_mode_cache)) { - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n"); + if (!tfo->tpg_check_demo_mode_cache) { + pr_err("Missing tfo->tpg_check_demo_mode_cache()\n"); return -EINVAL; } - if (!(tfo->tpg_check_demo_mode_write_protect)) { - printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n"); + if (!tfo->tpg_check_demo_mode_write_protect) { + pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n"); return -EINVAL; } - if (!(tfo->tpg_check_prod_mode_write_protect)) { - printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n"); + if (!tfo->tpg_check_prod_mode_write_protect) { + pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n"); return -EINVAL; } - if (!(tfo->tpg_alloc_fabric_acl)) { - printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n"); + if (!tfo->tpg_alloc_fabric_acl) { + pr_err("Missing tfo->tpg_alloc_fabric_acl()\n"); return -EINVAL; } - if (!(tfo->tpg_release_fabric_acl)) { - printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n"); + if (!tfo->tpg_release_fabric_acl) { + pr_err("Missing tfo->tpg_release_fabric_acl()\n"); return -EINVAL; } - if (!(tfo->tpg_get_inst_index)) { - printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n"); + if (!tfo->tpg_get_inst_index) { + pr_err("Missing tfo->tpg_get_inst_index()\n"); return -EINVAL; } if (!tfo->release_cmd) { - printk(KERN_ERR "Missing tfo->release_cmd()\n"); + pr_err("Missing tfo->release_cmd()\n"); return -EINVAL; } - if (!(tfo->shutdown_session)) { - printk(KERN_ERR "Missing tfo->shutdown_session()\n"); + if (!tfo->shutdown_session) { + pr_err("Missing tfo->shutdown_session()\n"); return -EINVAL; } - if (!(tfo->close_session)) { - printk(KERN_ERR "Missing tfo->close_session()\n"); + if (!tfo->close_session) { + pr_err("Missing tfo->close_session()\n"); return -EINVAL; } - if (!(tfo->stop_session)) { - printk(KERN_ERR "Missing tfo->stop_session()\n"); + if (!tfo->stop_session) { + pr_err("Missing tfo->stop_session()\n"); return -EINVAL; } - if (!(tfo->fall_back_to_erl0)) { - printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n"); + if (!tfo->fall_back_to_erl0) { + pr_err("Missing tfo->fall_back_to_erl0()\n"); return -EINVAL; } - if (!(tfo->sess_logged_in)) { - printk(KERN_ERR "Missing tfo->sess_logged_in()\n"); + if (!tfo->sess_logged_in) { + pr_err("Missing tfo->sess_logged_in()\n"); return -EINVAL; } - if (!(tfo->sess_get_index)) { - printk(KERN_ERR "Missing tfo->sess_get_index()\n"); + if (!tfo->sess_get_index) { + pr_err("Missing tfo->sess_get_index()\n"); return -EINVAL; } - if (!(tfo->write_pending)) { - printk(KERN_ERR "Missing tfo->write_pending()\n"); + if (!tfo->write_pending) { + pr_err("Missing tfo->write_pending()\n"); return -EINVAL; } - if (!(tfo->write_pending_status)) { - printk(KERN_ERR "Missing tfo->write_pending_status()\n"); + if (!tfo->write_pending_status) { + pr_err("Missing tfo->write_pending_status()\n"); return -EINVAL; } - if (!(tfo->set_default_node_attributes)) { - printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n"); + if (!tfo->set_default_node_attributes) { + pr_err("Missing tfo->set_default_node_attributes()\n"); return -EINVAL; } - if (!(tfo->get_task_tag)) { - printk(KERN_ERR "Missing tfo->get_task_tag()\n"); + if (!tfo->get_task_tag) { + pr_err("Missing tfo->get_task_tag()\n"); return -EINVAL; } - if (!(tfo->get_cmd_state)) { - printk(KERN_ERR "Missing tfo->get_cmd_state()\n"); + if (!tfo->get_cmd_state) { + pr_err("Missing tfo->get_cmd_state()\n"); return -EINVAL; } - if (!(tfo->queue_data_in)) { - printk(KERN_ERR "Missing tfo->queue_data_in()\n"); + if (!tfo->queue_data_in) { + pr_err("Missing tfo->queue_data_in()\n"); return -EINVAL; } - if (!(tfo->queue_status)) { - printk(KERN_ERR "Missing tfo->queue_status()\n"); + if (!tfo->queue_status) { + pr_err("Missing tfo->queue_status()\n"); return -EINVAL; } - if (!(tfo->queue_tm_rsp)) { - printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n"); + if (!tfo->queue_tm_rsp) { + pr_err("Missing tfo->queue_tm_rsp()\n"); return -EINVAL; } - if (!(tfo->set_fabric_sense_len)) { - printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n"); + if (!tfo->set_fabric_sense_len) { + pr_err("Missing tfo->set_fabric_sense_len()\n"); return -EINVAL; } - if (!(tfo->get_fabric_sense_len)) { - printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n"); + if (!tfo->get_fabric_sense_len) { + pr_err("Missing tfo->get_fabric_sense_len()\n"); return -EINVAL; } - if (!(tfo->is_state_remove)) { - printk(KERN_ERR "Missing tfo->is_state_remove()\n"); + if (!tfo->is_state_remove) { + pr_err("Missing tfo->is_state_remove()\n"); return -EINVAL; } /* @@ -500,20 +500,20 @@ static int target_fabric_tf_ops_check( * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in * target_core_fabric_configfs.c WWN+TPG group context code. */ - if (!(tfo->fabric_make_wwn)) { - printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n"); + if (!tfo->fabric_make_wwn) { + pr_err("Missing tfo->fabric_make_wwn()\n"); return -EINVAL; } - if (!(tfo->fabric_drop_wwn)) { - printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n"); + if (!tfo->fabric_drop_wwn) { + pr_err("Missing tfo->fabric_drop_wwn()\n"); return -EINVAL; } - if (!(tfo->fabric_make_tpg)) { - printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n"); + if (!tfo->fabric_make_tpg) { + pr_err("Missing tfo->fabric_make_tpg()\n"); return -EINVAL; } - if (!(tfo->fabric_drop_tpg)) { - printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n"); + if (!tfo->fabric_drop_tpg) { + pr_err("Missing tfo->fabric_drop_tpg()\n"); return -EINVAL; } @@ -533,13 +533,13 @@ int target_fabric_configfs_register( { int ret; - if (!(tf)) { - printk(KERN_ERR "Unable to locate target_fabric_configfs" + if (!tf) { + pr_err("Unable to locate target_fabric_configfs" " pointer\n"); return -EINVAL; } - if (!(tf->tf_subsys)) { - printk(KERN_ERR "Unable to target struct config_subsystem" + if (!tf->tf_subsys) { + pr_err("Unable to target struct config_subsystem" " pointer\n"); return -EINVAL; } @@ -547,7 +547,7 @@ int target_fabric_configfs_register( if (ret < 0) return ret; - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" + pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>" ">>>>>>>>>>\n"); return 0; } @@ -558,36 +558,36 @@ void target_fabric_configfs_deregister( { struct configfs_subsystem *su; - if (!(tf)) { - printk(KERN_ERR "Unable to locate passed target_fabric_" + if (!tf) { + pr_err("Unable to locate passed target_fabric_" "configfs\n"); return; } su = tf->tf_subsys; - if (!(su)) { - printk(KERN_ERR "Unable to locate passed tf->tf_subsys" + if (!su) { + pr_err("Unable to locate passed tf->tf_subsys" " pointer\n"); return; } - printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" + pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>" ">>>>>>>>>>>>\n"); mutex_lock(&g_tf_lock); if (atomic_read(&tf->tf_access_cnt)) { mutex_unlock(&g_tf_lock); - printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n", + pr_err("Non zero tf->tf_access_cnt for fabric %s\n", tf->tf_name); BUG(); } list_del(&tf->tf_list); mutex_unlock(&g_tf_lock); - printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" + pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:" " %s\n", tf->tf_name); tf->tf_module = NULL; tf->tf_subsys = NULL; kfree(tf); - printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" + pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>" ">>>>>\n"); } EXPORT_SYMBOL(target_fabric_configfs_deregister); @@ -609,7 +609,7 @@ static ssize_t target_core_dev_show_attr_##_name( \ \ spin_lock(&se_dev->se_dev_lock); \ dev = se_dev->se_dev_ptr; \ - if (!(dev)) { \ + if (!dev) { \ spin_unlock(&se_dev->se_dev_lock); \ return -ENODEV; \ } \ @@ -633,14 +633,14 @@ static ssize_t target_core_dev_store_attr_##_name( \ \ spin_lock(&se_dev->se_dev_lock); \ dev = se_dev->se_dev_ptr; \ - if (!(dev)) { \ + if (!dev) { \ spin_unlock(&se_dev->se_dev_lock); \ return -ENODEV; \ } \ ret = strict_strtoul(page, 0, &val); \ if (ret < 0) { \ spin_unlock(&se_dev->se_dev_lock); \ - printk(KERN_ERR "strict_strtoul() failed with" \ + pr_err("strict_strtoul() failed with" \ " ret: %d\n", ret); \ return -EINVAL; \ } \ @@ -806,7 +806,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial( struct se_device *dev; dev = se_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; return sprintf(page, "T10 VPD Unit Serial Number: %s\n", @@ -833,13 +833,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( * VPD Unit Serial Number that OS dependent multipath can depend on. */ if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) { - printk(KERN_ERR "Underlying SCSI device firmware provided VPD" + pr_err("Underlying SCSI device firmware provided VPD" " Unit Serial, ignoring request\n"); return -EOPNOTSUPP; } if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { - printk(KERN_ERR "Emulated VPD Unit Serial exceeds" + pr_err("Emulated VPD Unit Serial exceeds" " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN); return -EOVERFLOW; } @@ -850,9 +850,9 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( * could cause negative effects. */ dev = su_dev->se_dev_ptr; - if ((dev)) { + if (dev) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "Unable to set VPD Unit Serial while" + pr_err("Unable to set VPD Unit Serial while" " active %d $FABRIC_MOD exports exist\n", atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; @@ -870,7 +870,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial( "%s", strstrip(buf)); su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL; - printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:" + pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:" " %s\n", su_dev->t10_wwn.unit_serial); return count; @@ -892,19 +892,19 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier( ssize_t len = 0; dev = se_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; memset(buf, 0, VPD_TMP_BUF_SIZE); spin_lock(&t10_wwn->t10_vpd_lock); list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { - if (!(vpd->protocol_identifier_set)) + if (!vpd->protocol_identifier_set) continue; transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE); - if ((len + strlen(buf) >= PAGE_SIZE)) + if (len + strlen(buf) >= PAGE_SIZE) break; len += sprintf(page+len, "%s", buf); @@ -939,7 +939,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ ssize_t len = 0; \ \ dev = se_dev->se_dev_ptr; \ - if (!(dev)) \ + if (!dev) \ return -ENODEV; \ \ spin_lock(&t10_wwn->t10_vpd_lock); \ @@ -949,19 +949,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \ - if ((len + strlen(buf) >= PAGE_SIZE)) \ + if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \ - if ((len + strlen(buf) >= PAGE_SIZE)) \ + if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ \ memset(buf, 0, VPD_TMP_BUF_SIZE); \ transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \ - if ((len + strlen(buf) >= PAGE_SIZE)) \ + if (len + strlen(buf) >= PAGE_SIZE) \ break; \ len += sprintf(page+len, "%s", buf); \ } \ @@ -1070,7 +1070,7 @@ static ssize_t target_core_dev_pr_show_spc3_res( spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; - if (!(pr_reg)) { + if (!pr_reg) { *len += sprintf(page + *len, "No SPC-3 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return *len; @@ -1096,7 +1096,7 @@ static ssize_t target_core_dev_pr_show_spc2_res( spin_lock(&dev->dev_reservation_lock); se_nacl = dev->dev_reserved_node_acl; - if (!(se_nacl)) { + if (!se_nacl) { *len += sprintf(page + *len, "No SPC-2 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return *len; @@ -1115,7 +1115,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder( { ssize_t len = 0; - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; switch (su_dev->t10_pr.res_type) { @@ -1152,7 +1152,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( ssize_t len = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) @@ -1160,7 +1160,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts( spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; - if (!(pr_reg)) { + if (!pr_reg) { len = sprintf(page, "No SPC-3 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return len; @@ -1189,7 +1189,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation( struct se_subsystem_dev *su_dev, char *page) { - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) @@ -1216,7 +1216,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( ssize_t len = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) @@ -1224,7 +1224,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port( spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; - if (!(pr_reg)) { + if (!pr_reg) { len = sprintf(page, "No SPC-3 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return len; @@ -1263,7 +1263,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( ssize_t len = 0; int reg_count = 0, prf_isid; - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) @@ -1286,7 +1286,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( &i_buf[0] : "", pr_reg->pr_res_key, pr_reg->pr_res_generation); - if ((len + strlen(buf) >= PAGE_SIZE)) + if (len + strlen(buf) >= PAGE_SIZE) break; len += sprintf(page+len, "%s", buf); @@ -1294,7 +1294,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts( } spin_unlock(&su_dev->t10_pr.registration_lock); - if (!(reg_count)) + if (!reg_count) len += sprintf(page+len, "None\n"); return len; @@ -1314,7 +1314,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type( ssize_t len = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) @@ -1322,7 +1322,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type( spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; - if (!(pr_reg)) { + if (!pr_reg) { len = sprintf(page, "No SPC-3 Reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); return len; @@ -1345,7 +1345,7 @@ static ssize_t target_core_dev_pr_show_attr_res_type( { ssize_t len = 0; - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; switch (su_dev->t10_pr.res_type) { @@ -1376,7 +1376,7 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active( struct se_subsystem_dev *su_dev, char *page) { - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) @@ -1395,7 +1395,7 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata( struct se_subsystem_dev *su_dev, char *page) { - if (!(su_dev->se_dev_ptr)) + if (!su_dev->se_dev_ptr) return -ENODEV; if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) @@ -1447,14 +1447,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( u8 type = 0, scope; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS) return 0; if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_INFO "Unable to process APTPL metadata while" + pr_debug("Unable to process APTPL metadata while" " active fabric exports exist\n"); return -EINVAL; } @@ -1484,7 +1484,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( goto out; } if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) { - printk(KERN_ERR "APTPL metadata initiator_node=" + pr_err("APTPL metadata initiator_node=" " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", PR_APTPL_MAX_IPORT_LEN); ret = -EINVAL; @@ -1498,7 +1498,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( goto out; } if (strlen(isid) >= PR_REG_ISID_LEN) { - printk(KERN_ERR "APTPL metadata initiator_isid" + pr_err("APTPL metadata initiator_isid" "= exceeds PR_REG_ISID_LEN: %d\n", PR_REG_ISID_LEN); ret = -EINVAL; @@ -1513,7 +1513,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( } ret = strict_strtoull(arg_p, 0, &tmp_ll); if (ret < 0) { - printk(KERN_ERR "strict_strtoull() failed for" + pr_err("strict_strtoull() failed for" " sa_res_key=\n"); goto out; } @@ -1559,7 +1559,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( goto out; } if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) { - printk(KERN_ERR "APTPL metadata target_node=" + pr_err("APTPL metadata target_node=" " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", PR_APTPL_MAX_TPORT_LEN); ret = -EINVAL; @@ -1583,14 +1583,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( } } - if (!(i_port) || !(t_port) || !(sa_res_key)) { - printk(KERN_ERR "Illegal parameters for APTPL registration\n"); + if (!i_port || !t_port || !sa_res_key) { + pr_err("Illegal parameters for APTPL registration\n"); ret = -EINVAL; goto out; } if (res_holder && !(type)) { - printk(KERN_ERR "Illegal PR type: 0x%02x for reservation" + pr_err("Illegal PR type: 0x%02x for reservation" " holder\n", type); ret = -EINVAL; goto out; @@ -1649,7 +1649,7 @@ static ssize_t target_core_show_dev_info(void *p, char *page) int bl = 0; ssize_t read_bytes = 0; - if (!(se_dev->se_dev_ptr)) + if (!se_dev->se_dev_ptr) return -ENODEV; transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl); @@ -1675,8 +1675,8 @@ static ssize_t target_core_store_dev_control( struct se_hba *hba = se_dev->se_dev_hba; struct se_subsystem_api *t = hba->transport; - if (!(se_dev->se_dev_su_ptr)) { - printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se" + if (!se_dev->se_dev_su_ptr) { + pr_err("Unable to locate struct se_subsystem_dev>se" "_dev_su_ptr\n"); return -EINVAL; } @@ -1712,7 +1712,7 @@ static ssize_t target_core_store_dev_alias( ssize_t read_bytes; if (count > (SE_DEV_ALIAS_LEN-1)) { - printk(KERN_ERR "alias count: %d exceeds" + pr_err("alias count: %d exceeds" " SE_DEV_ALIAS_LEN-1: %u\n", (int)count, SE_DEV_ALIAS_LEN-1); return -EINVAL; @@ -1722,7 +1722,7 @@ static ssize_t target_core_store_dev_alias( read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page); - printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n", + pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&se_dev->se_dev_group.cg_item), se_dev->se_dev_alias); @@ -1758,7 +1758,7 @@ static ssize_t target_core_store_dev_udev_path( ssize_t read_bytes; if (count > (SE_UDEV_PATH_LEN-1)) { - printk(KERN_ERR "udev_path count: %d exceeds" + pr_err("udev_path count: %d exceeds" " SE_UDEV_PATH_LEN-1: %u\n", (int)count, SE_UDEV_PATH_LEN-1); return -EINVAL; @@ -1768,7 +1768,7 @@ static ssize_t target_core_store_dev_udev_path( read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN, "%s", page); - printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n", + pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&se_dev->se_dev_group.cg_item), se_dev->se_dev_udev_path); @@ -1796,13 +1796,13 @@ static ssize_t target_core_store_dev_enable( char *ptr; ptr = strstr(page, "1"); - if (!(ptr)) { - printk(KERN_ERR "For dev_enable ops, only valid value" + if (!ptr) { + pr_err("For dev_enable ops, only valid value" " is \"1\"\n"); return -EINVAL; } - if ((se_dev->se_dev_ptr)) { - printk(KERN_ERR "se_dev->se_dev_ptr already set for storage" + if (se_dev->se_dev_ptr) { + pr_err("se_dev->se_dev_ptr already set for storage" " object\n"); return -EEXIST; } @@ -1817,7 +1817,7 @@ static ssize_t target_core_store_dev_enable( return -EINVAL; se_dev->se_dev_ptr = dev; - printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" + pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:" " %p\n", se_dev->se_dev_ptr); return count; @@ -1841,22 +1841,22 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page) ssize_t len = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) return len; lu_gp_mem = dev->dev_alua_lu_gp_mem; - if (!(lu_gp_mem)) { - printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" + if (!lu_gp_mem) { + pr_err("NULL struct se_device->dev_alua_lu_gp_mem" " pointer\n"); return -EINVAL; } spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; - if ((lu_gp)) { + if (lu_gp) { lu_ci = &lu_gp->lu_gp_group.cg_item; len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", config_item_name(lu_ci), lu_gp->lu_gp_id); @@ -1880,17 +1880,17 @@ static ssize_t target_core_store_alua_lu_gp( int move = 0; dev = su_dev->se_dev_ptr; - if (!(dev)) + if (!dev) return -ENODEV; if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) { - printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n", + pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n", config_item_name(&hba->hba_group.cg_item), config_item_name(&su_dev->se_dev_group.cg_item)); return -EINVAL; } if (count > LU_GROUP_NAME_BUF) { - printk(KERN_ERR "ALUA LU Group Alias too large!\n"); + pr_err("ALUA LU Group Alias too large!\n"); return -EINVAL; } memset(buf, 0, LU_GROUP_NAME_BUF); @@ -1906,27 +1906,27 @@ static ssize_t target_core_store_alua_lu_gp( * core_alua_get_lu_gp_by_name below(). */ lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf)); - if (!(lu_gp_new)) + if (!lu_gp_new) return -ENODEV; } lu_gp_mem = dev->dev_alua_lu_gp_mem; - if (!(lu_gp_mem)) { + if (!lu_gp_mem) { if (lu_gp_new) core_alua_put_lu_gp_from_name(lu_gp_new); - printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem" + pr_err("NULL struct se_device->dev_alua_lu_gp_mem" " pointer\n"); return -EINVAL; } spin_lock(&lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; - if ((lu_gp)) { + if (lu_gp) { /* * Clearing an existing lu_gp association, and replacing * with NULL */ - if (!(lu_gp_new)) { - printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s" + if (!lu_gp_new) { + pr_debug("Target_Core_ConfigFS: Releasing %s/%s" " from ALUA LU Group: core/alua/lu_gps/%s, ID:" " %hu\n", config_item_name(&hba->hba_group.cg_item), @@ -1951,7 +1951,7 @@ static ssize_t target_core_store_alua_lu_gp( __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); spin_unlock(&lu_gp_mem->lu_gp_mem_lock); - printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" + pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:" " core/alua/lu_gps/%s, ID: %hu\n", (move) ? "Moving" : "Adding", config_item_name(&hba->hba_group.cg_item), @@ -1995,7 +1995,7 @@ static void target_core_dev_release(struct config_item *item) *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` */ if (se_dev->se_dev_ptr) { - printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" + pr_debug("Target_Core_ConfigFS: Calling se_free_" "virtual_device() for se_dev_ptr: %p\n", se_dev->se_dev_ptr); @@ -2004,14 +2004,14 @@ static void target_core_dev_release(struct config_item *item) /* * Release struct se_subsystem_dev->se_dev_su_ptr.. */ - printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" + pr_debug("Target_Core_ConfigFS: Calling t->free_" "device() for se_dev_su_ptr: %p\n", se_dev->se_dev_su_ptr); t->free_device(se_dev->se_dev_su_ptr); } - printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" + pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem" "_dev_t: %p\n", se_dev); kfree(se_dev); } @@ -2026,7 +2026,7 @@ static ssize_t target_core_dev_show(struct config_item *item, struct target_core_configfs_attribute *tc_attr = container_of( attr, struct target_core_configfs_attribute, attr); - if (!(tc_attr->show)) + if (!tc_attr->show) return -EINVAL; return tc_attr->show(se_dev, page); @@ -2042,7 +2042,7 @@ static ssize_t target_core_dev_store(struct config_item *item, struct target_core_configfs_attribute *tc_attr = container_of( attr, struct target_core_configfs_attribute, attr); - if (!(tc_attr->store)) + if (!tc_attr->store) return -EINVAL; return tc_attr->store(se_dev, page, count); @@ -2085,7 +2085,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id( struct t10_alua_lu_gp *lu_gp, char *page) { - if (!(lu_gp->lu_gp_valid_id)) + if (!lu_gp->lu_gp_valid_id) return 0; return sprintf(page, "%hu\n", lu_gp->lu_gp_id); @@ -2102,12 +2102,12 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( ret = strict_strtoul(page, 0, &lu_gp_id); if (ret < 0) { - printk(KERN_ERR "strict_strtoul() returned %d for" + pr_err("strict_strtoul() returned %d for" " lu_gp_id\n", ret); return -EINVAL; } if (lu_gp_id > 0x0000ffff) { - printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:" + pr_err("ALUA lu_gp_id: %lu exceeds maximum:" " 0x0000ffff\n", lu_gp_id); return -EINVAL; } @@ -2116,7 +2116,7 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id( if (ret < 0) return -EINVAL; - printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit" + pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit" " Group: core/alua/lu_gps/%s to ID: %hu\n", config_item_name(&alua_lu_gp_cg->cg_item), lu_gp->lu_gp_id); @@ -2154,7 +2154,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_members( cur_len++; /* Extra byte for NULL terminator */ if ((cur_len + len) > PAGE_SIZE) { - printk(KERN_WARNING "Ran out of lu_gp_show_attr" + pr_warn("Ran out of lu_gp_show_attr" "_members buffer\n"); break; } @@ -2218,7 +2218,7 @@ static struct config_group *target_core_alua_create_lu_gp( config_group_init_type_name(alua_lu_gp_cg, name, &target_core_alua_lu_gp_cit); - printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit" + pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit" " Group: core/alua/lu_gps/%s\n", config_item_name(alua_lu_gp_ci)); @@ -2233,7 +2233,7 @@ static void target_core_alua_drop_lu_gp( struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), struct t10_alua_lu_gp, lu_gp_group); - printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" + pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit" " Group: core/alua/lu_gps/%s, ID: %hu\n", config_item_name(item), lu_gp->lu_gp_id); /* @@ -2292,22 +2292,22 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state( unsigned long tmp; int new_state, ret; - if (!(tg_pt_gp->tg_pt_gp_valid_id)) { - printk(KERN_ERR "Unable to do implict ALUA on non valid" + if (!tg_pt_gp->tg_pt_gp_valid_id) { + pr_err("Unable to do implict ALUA on non valid" " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; } ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk("Unable to extract new ALUA access state from" + pr_err("Unable to extract new ALUA access state from" " %s\n", page); return -EINVAL; } new_state = (int)tmp; if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) { - printk(KERN_ERR "Unable to process implict configfs ALUA" + pr_err("Unable to process implict configfs ALUA" " transition while TPGS_IMPLICT_ALUA is diabled\n"); return -EINVAL; } @@ -2338,8 +2338,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( unsigned long tmp; int new_status, ret; - if (!(tg_pt_gp->tg_pt_gp_valid_id)) { - printk(KERN_ERR "Unable to do set ALUA access status on non" + if (!tg_pt_gp->tg_pt_gp_valid_id) { + pr_err("Unable to do set ALUA access status on non" " valid tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return -EINVAL; @@ -2347,7 +2347,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract new ALUA access status" + pr_err("Unable to extract new ALUA access status" " from %s\n", page); return -EINVAL; } @@ -2356,7 +2356,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status( if ((new_status != ALUA_STATUS_NONE) && (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) && (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) { - printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n", + pr_err("Illegal ALUA access status: 0x%02x\n", new_status); return -EINVAL; } @@ -2407,12 +2407,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata( ret = strict_strtoul(page, 0, &tmp); if (ret < 0) { - printk(KERN_ERR "Unable to extract alua_write_metadata\n"); + pr_err("Unable to extract alua_write_metadata\n"); return -EINVAL; } if ((tmp != 0) && (tmp != 1)) { - printk(KERN_ERR "Illegal value for alua_write_metadata:" + pr_err("Illegal value for alua_write_metadata:" " %lu\n", tmp); return -EINVAL; } @@ -2494,7 +2494,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id( struct t10_alua_tg_pt_gp *tg_pt_gp, char *page) { - if (!(tg_pt_gp->tg_pt_gp_valid_id)) + if (!tg_pt_gp->tg_pt_gp_valid_id) return 0; return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id); @@ -2511,12 +2511,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( ret = strict_strtoul(page, 0, &tg_pt_gp_id); if (ret < 0) { - printk(KERN_ERR "strict_strtoul() returned %d for" + pr_err("strict_strtoul() returned %d for" " tg_pt_gp_id\n", ret); return -EINVAL; } if (tg_pt_gp_id > 0x0000ffff) { - printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:" + pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:" " 0x0000ffff\n", tg_pt_gp_id); return -EINVAL; } @@ -2525,7 +2525,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id( if (ret < 0) return -EINVAL; - printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: " + pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: " "core/alua/tg_pt_gps/%s to ID: %hu\n", config_item_name(&alua_tg_pt_gp_cg->cg_item), tg_pt_gp->tg_pt_gp_id); @@ -2566,7 +2566,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members( cur_len++; /* Extra byte for NULL terminator */ if ((cur_len + len) > PAGE_SIZE) { - printk(KERN_WARNING "Ran out of lu_gp_show_attr" + pr_warn("Ran out of lu_gp_show_attr" "_members buffer\n"); break; } @@ -2632,7 +2632,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp( struct config_item *alua_tg_pt_gp_ci = NULL; tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0); - if (!(tg_pt_gp)) + if (!tg_pt_gp) return NULL; alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group; @@ -2641,7 +2641,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp( config_group_init_type_name(alua_tg_pt_gp_cg, name, &target_core_alua_tg_pt_gp_cit); - printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port" + pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port" " Group: alua/tg_pt_gps/%s\n", config_item_name(alua_tg_pt_gp_ci)); @@ -2655,7 +2655,7 @@ static void target_core_alua_drop_tg_pt_gp( struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), struct t10_alua_tg_pt_gp, tg_pt_gp_group); - printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" + pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port" " Group: alua/tg_pt_gps/%s, ID: %hu\n", config_item_name(item), tg_pt_gp->tg_pt_gp_id); /* @@ -2746,7 +2746,7 @@ static struct config_group *target_core_make_subdev( se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); if (!se_dev) { - printk(KERN_ERR "Unable to allocate memory for" + pr_err("Unable to allocate memory for" " struct se_subsystem_dev\n"); goto unlock; } @@ -2770,7 +2770,7 @@ static struct config_group *target_core_make_subdev( dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7, GFP_KERNEL); - if (!(dev_cg->default_groups)) + if (!dev_cg->default_groups) goto out; /* * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr @@ -2781,8 +2781,8 @@ static struct config_group *target_core_make_subdev( * configfs tree for device object's struct config_group. */ se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name); - if (!(se_dev->se_dev_su_ptr)) { - printk(KERN_ERR "Unable to locate subsystem dependent pointer" + if (!se_dev->se_dev_su_ptr) { + pr_err("Unable to locate subsystem dependent pointer" " from allocate_virtdevice()\n"); goto out; } @@ -2813,14 +2813,14 @@ static struct config_group *target_core_make_subdev( * Add core/$HBA/$DEV/alua/default_tg_pt_gp */ tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1); - if (!(tg_pt_gp)) + if (!tg_pt_gp) goto out; tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group; tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(tg_pt_gp_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->" + if (!tg_pt_gp_cg->default_groups) { + pr_err("Unable to allocate tg_pt_gp_cg->" "default_groups\n"); goto out; } @@ -2837,12 +2837,12 @@ static struct config_group *target_core_make_subdev( dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4, GFP_KERNEL); if (!dev_stat_grp->default_groups) { - printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n"); + pr_err("Unable to allocate dev_stat_grp->default_groups\n"); goto out; } target_stat_setup_dev_default_groups(se_dev); - printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" + pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:" " %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr); mutex_unlock(&hba->hba_access_mutex); @@ -2975,13 +2975,13 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba, ret = strict_strtoul(page, 0, &mode_flag); if (ret < 0) { - printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret); + pr_err("Unable to extract hba mode flag: %d\n", ret); return -EINVAL; } spin_lock(&hba->device_lock); - if (!(list_empty(&hba->hba_dev_list))) { - printk(KERN_ERR "Unable to set hba_mode with active devices\n"); + if (!list_empty(&hba->hba_dev_list)) { + pr_err("Unable to set hba_mode with active devices\n"); spin_unlock(&hba->device_lock); return -EINVAL; } @@ -3040,7 +3040,7 @@ static struct config_group *target_core_call_addhbatotarget( memset(buf, 0, TARGET_CORE_NAME_MAX_LEN); if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) { - printk(KERN_ERR "Passed *name strlen(): %d exceeds" + pr_err("Passed *name strlen(): %d exceeds" " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name), TARGET_CORE_NAME_MAX_LEN); return ERR_PTR(-ENAMETOOLONG); @@ -3048,8 +3048,8 @@ static struct config_group *target_core_call_addhbatotarget( snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name); str = strstr(buf, "_"); - if (!(str)) { - printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); + if (!str) { + pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); return ERR_PTR(-EINVAL); } se_plugin_str = buf; @@ -3058,7 +3058,7 @@ static struct config_group *target_core_call_addhbatotarget( * Namely rd_direct and rd_mcp.. */ str2 = strstr(str+1, "_"); - if ((str2)) { + if (str2) { *str2 = '\0'; /* Terminate for *se_plugin_str */ str2++; /* Skip to start of plugin dependent ID */ str = str2; @@ -3069,7 +3069,7 @@ static struct config_group *target_core_call_addhbatotarget( ret = strict_strtoul(str, 0, &plugin_dep_id); if (ret < 0) { - printk(KERN_ERR "strict_strtoul() returned %d for" + pr_err("strict_strtoul() returned %d for" " plugin_dep_id\n", ret); return ERR_PTR(-EINVAL); } @@ -3122,7 +3122,7 @@ static int __init target_core_init_configfs(void) struct t10_alua_lu_gp *lu_gp; int ret; - printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage" + pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" " Engine: %s on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine); @@ -3142,8 +3142,8 @@ static int __init target_core_init_configfs(void) target_cg = &subsys->su_group; target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(target_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate target_cg->default_groups\n"); + if (!target_cg->default_groups) { + pr_err("Unable to allocate target_cg->default_groups\n"); goto out_global; } @@ -3157,8 +3157,8 @@ static int __init target_core_init_configfs(void) hba_cg = &target_core_hbagroup; hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(hba_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n"); + if (!hba_cg->default_groups) { + pr_err("Unable to allocate hba_cg->default_groups\n"); goto out_global; } config_group_init_type_name(&alua_group, @@ -3172,8 +3172,8 @@ static int __init target_core_init_configfs(void) alua_cg = &alua_group; alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(alua_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n"); + if (!alua_cg->default_groups) { + pr_err("Unable to allocate alua_cg->default_groups\n"); goto out_global; } @@ -3191,8 +3191,8 @@ static int __init target_core_init_configfs(void) lu_gp_cg = &alua_lu_gps_group; lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); - if (!(lu_gp_cg->default_groups)) { - printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n"); + if (!lu_gp_cg->default_groups) { + pr_err("Unable to allocate lu_gp_cg->default_groups\n"); goto out_global; } @@ -3206,11 +3206,11 @@ static int __init target_core_init_configfs(void) */ ret = configfs_register_subsystem(subsys); if (ret < 0) { - printk(KERN_ERR "Error %d while registering subsystem %s\n", + pr_err("Error %d while registering subsystem %s\n", ret, subsys->su_group.cg_item.ci_namebuf); goto out_global; } - printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric" + pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric" " Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s" " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine); /* @@ -3290,7 +3290,7 @@ static void __exit target_core_exit_configfs(void) core_alua_free_lu_gp(default_lu_gp); default_lu_gp = NULL; - printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" + pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric" " Infrastructure\n"); core_dev_release_virtual_lun0(); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 1185c3b76d47..81860ddc7cc4 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -84,7 +84,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" + pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" " Access for 0x%08x\n", se_cmd->se_tfo->get_fabric_name(), unpacked_lun); @@ -117,7 +117,7 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) if (unpacked_lun != 0) { se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; - printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" + pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", se_cmd->se_tfo->get_fabric_name(), unpacked_lun); @@ -204,7 +204,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); if (!se_lun) { - printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" + pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08x\n", se_cmd->se_tfo->get_fabric_name(), unpacked_lun); @@ -255,15 +255,15 @@ struct se_dev_entry *core_get_se_deve_from_rtpi( continue; lun = deve->se_lun; - if (!(lun)) { - printk(KERN_ERR "%s device entries device pointer is" + if (!lun) { + pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", tpg->se_tpg_tfo->get_fabric_name()); continue; } port = lun->lun_sep; - if (!(port)) { - printk(KERN_ERR "%s device entries device pointer is" + if (!port) { + pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", tpg->se_tpg_tfo->get_fabric_name()); continue; @@ -301,7 +301,7 @@ int core_free_device_list_for_node( continue; if (!deve->se_lun) { - printk(KERN_ERR "%s device entries device pointer is" + pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", tpg->se_tpg_tfo->get_fabric_name()); continue; @@ -372,7 +372,7 @@ int core_update_device_list_for_node( * struct se_dev_entry pointers below as logic in * core_alua_do_transition_tg_pt() depends on these being present. */ - if (!(enable)) { + if (!enable) { /* * deve->se_lun_acl will be NULL for demo-mode created LUNs * that have not been explicitly concerted to MappedLUNs -> @@ -395,14 +395,14 @@ int core_update_device_list_for_node( */ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { if (deve->se_lun_acl != NULL) { - printk(KERN_ERR "struct se_dev_entry->se_lun_acl" + pr_err("struct se_dev_entry->se_lun_acl" " already set for demo mode -> explict" " LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); return -EINVAL; } if (deve->se_lun != lun) { - printk(KERN_ERR "struct se_dev_entry->se_lun does" + pr_err("struct se_dev_entry->se_lun does" " match passed struct se_lun for demo mode" " -> explict LUN ACL transition\n"); spin_unlock_irq(&nacl->device_list_lock); @@ -501,8 +501,8 @@ static struct se_port *core_alloc_port(struct se_device *dev) struct se_port *port, *port_tmp; port = kzalloc(sizeof(struct se_port), GFP_KERNEL); - if (!(port)) { - printk(KERN_ERR "Unable to allocate struct se_port\n"); + if (!port) { + pr_err("Unable to allocate struct se_port\n"); return ERR_PTR(-ENOMEM); } INIT_LIST_HEAD(&port->sep_alua_list); @@ -513,7 +513,7 @@ static struct se_port *core_alloc_port(struct se_device *dev) spin_lock(&dev->se_port_lock); if (dev->dev_port_count == 0x0000ffff) { - printk(KERN_WARNING "Reached dev->dev_port_count ==" + pr_warn("Reached dev->dev_port_count ==" " 0x0000ffff\n"); spin_unlock(&dev->se_port_lock); return ERR_PTR(-ENOSPC); @@ -532,7 +532,7 @@ again: * 3h to FFFFh Relative port 3 through 65 535 */ port->sep_rtpi = dev->dev_rpti_counter++; - if (!(port->sep_rtpi)) + if (!port->sep_rtpi) goto again; list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { @@ -570,7 +570,7 @@ static void core_export_port( if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) { tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { - printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" + pr_err("Unable to allocate t10_alua_tg_pt" "_gp_member_t\n"); return; } @@ -578,7 +578,7 @@ static void core_export_port( __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, su_dev->t10_alua.default_tg_pt_gp); spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); - printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" + pr_debug("%s/%s: Adding to default ALUA Target Port" " Group: alua/default_tg_pt_gp\n", dev->transport->name, tpg->se_tpg_tfo->get_fabric_name()); } @@ -663,8 +663,8 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) list_for_each_entry(se_task, &se_cmd->t_task_list, t_list) break; - if (!(se_task)) { - printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); + if (!se_task) { + pr_err("Unable to locate struct se_task for struct se_cmd\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -675,7 +675,7 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd) * coming via a target_core_mod PASSTHROUGH op, and not through * a $FABRIC_MOD. In that case, report LUN=0 only. */ - if (!(se_sess)) { + if (!se_sess) { int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); lun_count = 1; goto done; @@ -893,12 +893,12 @@ void se_dev_set_default_attribs( int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) { if (task_timeout > DA_TASK_TIMEOUT_MAX) { - printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" + pr_err("dev[%p]: Passed task_timeout: %u larger then" " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); return -EINVAL; } else { dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; - printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", + pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", dev, task_timeout); } @@ -910,7 +910,7 @@ int se_dev_set_max_unmap_lba_count( u32 max_unmap_lba_count) { dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count; - printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", + pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n", dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count); return 0; } @@ -921,7 +921,7 @@ int se_dev_set_max_unmap_block_desc_count( { dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = max_unmap_block_desc_count; - printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", + pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n", dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count); return 0; } @@ -931,7 +931,7 @@ int se_dev_set_unmap_granularity( u32 unmap_granularity) { dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity; - printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", + pr_debug("dev[%p]: Set unmap_granularity: %u\n", dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity); return 0; } @@ -941,7 +941,7 @@ int se_dev_set_unmap_granularity_alignment( u32 unmap_granularity_alignment) { dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment; - printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", + pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n", dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment); return 0; } @@ -949,19 +949,19 @@ int se_dev_set_unmap_granularity_alignment( int se_dev_set_emulate_dpo(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } if (dev->transport->dpo_emulated == NULL) { - printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n"); + pr_err("dev->transport->dpo_emulated is NULL\n"); return -EINVAL; } if (dev->transport->dpo_emulated(dev) == 0) { - printk(KERN_ERR "dev->transport->dpo_emulated not supported\n"); + pr_err("dev->transport->dpo_emulated not supported\n"); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; - printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" + pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); return 0; } @@ -969,19 +969,19 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } if (dev->transport->fua_write_emulated == NULL) { - printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n"); + pr_err("dev->transport->fua_write_emulated is NULL\n"); return -EINVAL; } if (dev->transport->fua_write_emulated(dev) == 0) { - printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n"); + pr_err("dev->transport->fua_write_emulated not supported\n"); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; - printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", + pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write); return 0; } @@ -989,19 +989,19 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } if (dev->transport->fua_read_emulated == NULL) { - printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n"); + pr_err("dev->transport->fua_read_emulated is NULL\n"); return -EINVAL; } if (dev->transport->fua_read_emulated(dev) == 0) { - printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n"); + pr_err("dev->transport->fua_read_emulated not supported\n"); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; - printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", + pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); return 0; } @@ -1009,19 +1009,19 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } if (dev->transport->write_cache_emulated == NULL) { - printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n"); + pr_err("dev->transport->write_cache_emulated is NULL\n"); return -EINVAL; } if (dev->transport->write_cache_emulated(dev) == 0) { - printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n"); + pr_err("dev->transport->write_cache_emulated not supported\n"); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; - printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", + pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache); return 0; } @@ -1029,19 +1029,19 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1) && (flag != 2)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device" + pr_err("dev[%p]: Unable to change SE Device" " UA_INTRLCK_CTRL while dev_export_obj: %d count" " exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag; - printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", + pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl); return 0; @@ -1050,18 +1050,18 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) int se_dev_set_emulate_tas(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" + pr_err("dev[%p]: Unable to change SE Device TAS while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.emulate_tas = flag; - printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", + pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n", dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled"); return 0; @@ -1070,20 +1070,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag) int se_dev_set_emulate_tpu(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { - printk(KERN_ERR "Generic Block Discard not supported\n"); + if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag; - printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", + pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", dev, flag); return 0; } @@ -1091,20 +1091,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) int se_dev_set_emulate_tpws(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } /* * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) { - printk(KERN_ERR "Generic Block Discard not supported\n"); + if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag; - printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", + pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", dev, flag); return 0; } @@ -1112,11 +1112,11 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) { if ((flag != 0) && (flag != 1)) { - printk(KERN_ERR "Illegal value %d\n", flag); + pr_err("Illegal value %d\n", flag); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag; - printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, + pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled"); return 0; } @@ -1141,20 +1141,20 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) u32 orig_queue_depth = dev->queue_depth; if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" + pr_err("dev[%p]: Unable to change SE Device TCQ while" " dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } - if (!(queue_depth)) { - printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" + if (!queue_depth) { + pr_err("dev[%p]: Illegal ZERO value for queue" "_depth\n", dev); return -EINVAL; } if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { - printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" + pr_err("dev[%p]: Passed queue_depth: %u" " exceeds TCM/SE_Device TCQ: %u\n", dev, queue_depth, dev->se_sub_dev->se_dev_attrib.hw_queue_depth); @@ -1163,7 +1163,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) } else { if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) { if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) { - printk(KERN_ERR "dev[%p]: Passed queue_depth:" + pr_err("dev[%p]: Passed queue_depth:" " %u exceeds TCM/SE_Device MAX" " TCQ: %u\n", dev, queue_depth, dev->se_sub_dev->se_dev_attrib.hw_queue_depth); @@ -1178,7 +1178,7 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) else if (queue_depth < orig_queue_depth) atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); - printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", + pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, queue_depth); return 0; } @@ -1188,41 +1188,41 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) int force = 0; /* Force setting for VDEVS */ if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device" + pr_err("dev[%p]: Unable to change SE Device" " max_sectors while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } - if (!(max_sectors)) { - printk(KERN_ERR "dev[%p]: Illegal ZERO value for" + if (!max_sectors) { + pr_err("dev[%p]: Illegal ZERO value for" " max_sectors\n", dev); return -EINVAL; } if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" + pr_err("dev[%p]: Passed max_sectors: %u less than" " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MIN); return -EINVAL; } if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) { - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + pr_err("dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors:" " %u\n", dev, max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); return -EINVAL; } } else { - if (!(force) && (max_sectors > + if (!force && (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) { - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + pr_err("dev[%p]: Passed max_sectors: %u" " greater than TCM/SE_Device max_sectors" ": %u, use force=1 to override.\n", dev, max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors); return -EINVAL; } if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { - printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" + pr_err("dev[%p]: Passed max_sectors: %u" " greater than DA_STATUS_MAX_SECTORS_MAX:" " %u\n", dev, max_sectors, DA_STATUS_MAX_SECTORS_MAX); @@ -1231,7 +1231,7 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) } dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors; - printk("dev[%p]: SE Device max_sectors changed to %u\n", + pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", dev, max_sectors); return 0; } @@ -1239,25 +1239,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device" + pr_err("dev[%p]: Unable to change SE Device" " optimal_sectors while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; } if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" + pr_err("dev[%p]: Passed optimal_sectors cannot be" " changed for TCM/pSCSI\n", dev); return -EINVAL; } if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) { - printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" + pr_err("dev[%p]: Passed optimal_sectors %u cannot be" " greater than max_sectors: %u\n", dev, optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors; - printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", + pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n", dev, optimal_sectors); return 0; } @@ -1265,7 +1265,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) int se_dev_set_block_size(struct se_device *dev, u32 block_size) { if (atomic_read(&dev->dev_export_obj.obj_access_count)) { - printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" + pr_err("dev[%p]: Unable to change SE Device block_size" " while dev_export_obj: %d count exists\n", dev, atomic_read(&dev->dev_export_obj.obj_access_count)); return -EINVAL; @@ -1275,21 +1275,21 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size) (block_size != 1024) && (block_size != 2048) && (block_size != 4096)) { - printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" + pr_err("dev[%p]: Illegal value for block_device: %u" " for SE device, must be 512, 1024, 2048 or 4096\n", dev, block_size); return -EINVAL; } if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { - printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" + pr_err("dev[%p]: Not allowed to change block_size for" " Physical Device, use for Linux/SCSI to change" " block_size for underlying hardware\n", dev); return -EINVAL; } dev->se_sub_dev->se_dev_attrib.block_size = block_size; - printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", + pr_debug("dev[%p]: SE Device block_size changed to %u\n", dev, block_size); return 0; } @@ -1304,13 +1304,13 @@ struct se_lun *core_dev_add_lun( u32 lun_access = 0; if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { - printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", + pr_err("Unable to export struct se_device while dev_access_obj: %d\n", atomic_read(&dev->dev_access_obj.obj_access_count)); return NULL; } lun_p = core_tpg_pre_addlun(tpg, lun); - if ((IS_ERR(lun_p)) || !(lun_p)) + if ((IS_ERR(lun_p)) || !lun_p) return NULL; if (dev->dev_flags & DF_READ_ONLY) @@ -1321,7 +1321,7 @@ struct se_lun *core_dev_add_lun( if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) return NULL; - printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" + pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id); @@ -1357,12 +1357,12 @@ int core_dev_del_lun( int ret = 0; lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); - if (!(lun)) + if (!lun) return ret; core_tpg_post_dellun(tpg, lun); - printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" + pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" " device object\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, tpg->se_tpg_tfo->get_fabric_name()); @@ -1376,7 +1376,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l spin_lock(&tpg->tpg_lun_lock); if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS" "_PER_TPG-1: %u for Target Portal Group: %hu\n", tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, @@ -1387,7 +1387,7 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { - printk(KERN_ERR "%s Logical Unit Number: %u is not free on" + pr_err("%s Logical Unit Number: %u is not free on" " Target Portal Group: %hu, ignoring request.\n", tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, tpg->se_tpg_tfo->tpg_get_tag(tpg)); @@ -1409,7 +1409,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked spin_lock(&tpg->tpg_lun_lock); if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" "_TPG-1: %u for Target Portal Group: %hu\n", tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, @@ -1420,7 +1420,7 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + pr_err("%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, tpg->se_tpg_tfo->tpg_get_tag(tpg)); @@ -1442,19 +1442,19 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl( struct se_node_acl *nacl; if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { - printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", + pr_err("%s InitiatorName exceeds maximum size.\n", tpg->se_tpg_tfo->get_fabric_name()); *ret = -EOVERFLOW; return NULL; } nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); - if (!(nacl)) { + if (!nacl) { *ret = -EINVAL; return NULL; } lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); - if (!(lacl)) { - printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); + if (!lacl) { + pr_err("Unable to allocate memory for struct se_lun_acl.\n"); *ret = -ENOMEM; return NULL; } @@ -1477,8 +1477,8 @@ int core_dev_add_initiator_node_lun_acl( struct se_node_acl *nacl; lun = core_dev_get_lun(tpg, unpacked_lun); - if (!(lun)) { - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + if (!lun) { + pr_err("%s Logical Unit Number: %u is not active on" " Target Portal Group: %hu, ignoring request.\n", tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, tpg->se_tpg_tfo->tpg_get_tag(tpg)); @@ -1486,7 +1486,7 @@ int core_dev_add_initiator_node_lun_acl( } nacl = lacl->se_lun_nacl; - if (!(nacl)) + if (!nacl) return -EINVAL; if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && @@ -1505,7 +1505,7 @@ int core_dev_add_initiator_node_lun_acl( smp_mb__after_atomic_inc(); spin_unlock(&lun->lun_acl_lock); - printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " + pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", @@ -1530,7 +1530,7 @@ int core_dev_del_initiator_node_lun_acl( struct se_node_acl *nacl; nacl = lacl->se_lun_nacl; - if (!(nacl)) + if (!nacl) return -EINVAL; spin_lock(&lun->lun_acl_lock); @@ -1544,7 +1544,7 @@ int core_dev_del_initiator_node_lun_acl( lacl->se_lun = NULL; - printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" + pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for" " InitiatorNode: %s Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, @@ -1557,7 +1557,7 @@ void core_dev_free_initiator_node_lun_acl( struct se_portal_group *tpg, struct se_lun_acl *lacl) { - printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" + pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), tpg->se_tpg_tfo->get_fabric_name(), @@ -1575,7 +1575,7 @@ int core_dev_setup_virtual_lun0(void) char buf[16]; int ret; - hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); + hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE); if (IS_ERR(hba)) return PTR_ERR(hba); @@ -1583,8 +1583,8 @@ int core_dev_setup_virtual_lun0(void) t = hba->transport; se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); - if (!(se_dev)) { - printk(KERN_ERR "Unable to allocate memory for" + if (!se_dev) { + pr_err("Unable to allocate memory for" " struct se_subsystem_dev\n"); ret = -ENOMEM; goto out; @@ -1606,8 +1606,8 @@ int core_dev_setup_virtual_lun0(void) se_dev->se_dev_hba = hba; se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); - if (!(se_dev->se_dev_su_ptr)) { - printk(KERN_ERR "Unable to locate subsystem dependent pointer" + if (!se_dev->se_dev_su_ptr) { + pr_err("Unable to locate subsystem dependent pointer" " from allocate_virtdevice()\n"); ret = -ENOMEM; goto out; @@ -1643,7 +1643,7 @@ void core_dev_release_virtual_lun0(void) struct se_hba *hba = lun0_hba; struct se_subsystem_dev *su_dev = lun0_su_dev; - if (!(hba)) + if (!hba) return; if (g_lun0_dev) diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 0b1659d0fefc..f1654694f4ea 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) cit->ct_group_ops = _group_ops; \ cit->ct_attrs = _attrs; \ cit->ct_owner = tf->tf_module; \ - printk("Setup generic %s\n", __stringify(_name)); \ + pr_debug("Setup generic %s\n", __stringify(_name)); \ } /* Start of tfc_tpg_mappedlun_cit */ @@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link( /* * Ensure that the source port exists */ - if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) { - printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep" + if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { + pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" "_tpg does not exist\n"); return -EINVAL; } @@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link( * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT */ if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { - printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n", + pr_err("Illegal Initiator ACL SymLink outside of %s\n", config_item_name(wwn_ci)); return -EINVAL; } if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { - printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s" + pr_err("Illegal Initiator ACL Symlink outside of %s" " TPGT: %s\n", config_item_name(wwn_ci), config_item_name(tpg_ci)); return -EINVAL; @@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink( /* * Determine if the underlying MappedLUN has already been released.. */ - if (!(deve->se_lun)) + if (!deve->se_lun) return 0; lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); @@ -202,7 +202,7 @@ static ssize_t target_fabric_mappedlun_store_write_protect( TRANSPORT_LUNFLAGS_READ_WRITE, lacl->se_lun_nacl); - printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s" + pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" " Mapped LUN: %u Write Protect bit to %s\n", se_tpg->se_tpg_tfo->get_fabric_name(), lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); @@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun( int ret = 0; acl_ci = &group->cg_item; - if (!(acl_ci)) { - printk(KERN_ERR "Unable to locatel acl_ci\n"); + if (!acl_ci) { + pr_err("Unable to locatel acl_ci\n"); return NULL; } buf = kzalloc(strlen(name) + 1, GFP_KERNEL); - if (!(buf)) { - printk(KERN_ERR "Unable to allocate memory for name buf\n"); + if (!buf) { + pr_err("Unable to allocate memory for name buf\n"); return ERR_PTR(-ENOMEM); } snprintf(buf, strlen(name) + 1, "%s", name); @@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun( * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. */ if (strstr(buf, "lun_") != buf) { - printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s" + pr_err("Unable to locate \"lun_\" from buf: %s" " name: %s\n", buf, name); ret = -EINVAL; goto out; @@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun( lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun, config_item_name(acl_ci), &ret); - if (!(lacl)) { + if (!lacl) { ret = -EINVAL; goto out; } @@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun( lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!lacl_cg->default_groups) { - printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n"); + pr_err("Unable to allocate lacl_cg->default_groups\n"); ret = -ENOMEM; goto out; } @@ -383,7 +383,7 @@ static struct config_group *target_fabric_make_mappedlun( ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, GFP_KERNEL); if (!ml_stat_grp->default_groups) { - printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n"); + pr_err("Unable to allocate ml_stat_grp->default_groups\n"); ret = -ENOMEM; goto out; } @@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl( struct se_node_acl *se_nacl; struct config_group *nacl_cg; - if (!(tf->tf_ops.fabric_make_nodeacl)) { - printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n"); + if (!tf->tf_ops.fabric_make_nodeacl) { + pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n"); return ERR_PTR(-ENOSYS); } @@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np( struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct se_tpg_np *se_tpg_np; - if (!(tf->tf_ops.fabric_make_np)) { - printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n"); + if (!tf->tf_ops.fabric_make_np) { + pr_err("tf->tf_ops.fabric_make_np is NULL\n"); return ERR_PTR(-ENOSYS); } se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); - if (!(se_tpg_np) || IS_ERR(se_tpg_np)) + if (!se_tpg_np || IS_ERR(se_tpg_np)) return ERR_PTR(-EINVAL); se_tpg_np->tpg_np_parent = se_tpg; @@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( struct se_lun *lun, char *page) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); @@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( const char *page, size_t count) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); @@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( struct se_lun *lun, char *page) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_offline_bit(lun, page); @@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( const char *page, size_t count) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_offline_bit(lun, page, count); @@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( struct se_lun *lun, char *page) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_secondary_status(lun, page); @@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( const char *page, size_t count) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_secondary_status(lun, page, count); @@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( struct se_lun *lun, char *page) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_secondary_write_metadata(lun, page); @@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( const char *page, size_t count) { - if (!(lun)) - return -ENODEV; - - if (!(lun->lun_sep)) + if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_secondary_write_metadata(lun, page, count); @@ -781,13 +757,13 @@ static int target_fabric_port_link( tf = se_tpg->se_tpg_wwn->wwn_tf; if (lun->lun_se_dev != NULL) { - printk(KERN_ERR "Port Symlink already exists\n"); + pr_err("Port Symlink already exists\n"); return -EEXIST; } dev = se_dev->se_dev_ptr; - if (!(dev)) { - printk(KERN_ERR "Unable to locate struct se_device pointer from" + if (!dev) { + pr_err("Unable to locate struct se_device pointer from" " %s\n", config_item_name(se_dev_ci)); ret = -ENODEV; goto out; @@ -795,8 +771,8 @@ static int target_fabric_port_link( lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev, lun->unpacked_lun); - if ((IS_ERR(lun_p)) || !(lun_p)) { - printk(KERN_ERR "core_dev_add_lun() failed\n"); + if (IS_ERR(lun_p) || !lun_p) { + pr_err("core_dev_add_lun() failed\n"); ret = -EINVAL; goto out; } @@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun( int errno; if (strstr(name, "lun_") != name) { - printk(KERN_ERR "Unable to locate \'_\" in" + pr_err("Unable to locate \'_\" in" " \"lun_$LUN_NUMBER\"\n"); return ERR_PTR(-EINVAL); } @@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun( return ERR_PTR(-EINVAL); lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); - if (!(lun)) + if (!lun) return ERR_PTR(-EINVAL); lun_cg = &lun->lun_group; lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, GFP_KERNEL); if (!lun_cg->default_groups) { - printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n"); + pr_err("Unable to allocate lun_cg->default_groups\n"); return ERR_PTR(-ENOMEM); } @@ -918,7 +894,7 @@ static struct config_group *target_fabric_make_lun( port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3, GFP_KERNEL); if (!port_stat_grp->default_groups) { - printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n"); + pr_err("Unable to allocate port_stat_grp->default_groups\n"); errno = -ENOMEM; goto out; } @@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg( struct target_fabric_configfs *tf = wwn->wwn_tf; struct se_portal_group *se_tpg; - if (!(tf->tf_ops.fabric_make_tpg)) { - printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n"); + if (!tf->tf_ops.fabric_make_tpg) { + pr_err("tf->tf_ops.fabric_make_tpg is NULL\n"); return ERR_PTR(-ENOSYS); } se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); - if (!(se_tpg) || IS_ERR(se_tpg)) + if (!se_tpg || IS_ERR(se_tpg)) return ERR_PTR(-EINVAL); /* * Setup default groups from pre-allocated se_tpg->tpg_default_groups @@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn( struct target_fabric_configfs, tf_group); struct se_wwn *wwn; - if (!(tf->tf_ops.fabric_make_wwn)) { - printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n"); + if (!tf->tf_ops.fabric_make_wwn) { + pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); return ERR_PTR(-ENOSYS); } wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); - if (!(wwn) || IS_ERR(wwn)) + if (!wwn || IS_ERR(wwn)) return ERR_PTR(-EINVAL); wwn->wwn_tf = tf; diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index 1e193f324895..a29968806cfc 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -172,7 +172,7 @@ u32 fc_get_pr_transport_id( ptr = &se_nacl->initiatorname[0]; for (i = 0; i < 24; ) { - if (!(strncmp(&ptr[i], ":", 1))) { + if (!strncmp(&ptr[i], ":", 1)) { i++; continue; } @@ -386,7 +386,7 @@ char *iscsi_parse_pr_out_transport_id( * Reserved */ if ((format_code != 0x00) && (format_code != 0x40)) { - printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI" + pr_err("Illegal format code: 0x%02x for iSCSI" " Initiator Transport ID\n", format_code); return NULL; } @@ -406,7 +406,7 @@ char *iscsi_parse_pr_out_transport_id( tid_len += padding; if ((add_len + 4) != tid_len) { - printk(KERN_INFO "LIO-Target Extracted add_len: %hu " + pr_debug("LIO-Target Extracted add_len: %hu " "does not match calculated tid_len: %u," " using tid_len instead\n", add_len+4, tid_len); *out_tid_len = tid_len; @@ -420,8 +420,8 @@ char *iscsi_parse_pr_out_transport_id( */ if (format_code == 0x40) { p = strstr((char *)&buf[4], ",i,0x"); - if (!(p)) { - printk(KERN_ERR "Unable to locate \",i,0x\" seperator" + if (!p) { + pr_err("Unable to locate \",i,0x\" seperator" " for Initiator port identifier: %s\n", (char *)&buf[4]); return NULL; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 5c47f4202386..bc1b33639b8d 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -42,18 +42,6 @@ #include "target_core_file.h" -#if 1 -#define DEBUG_FD_CACHE(x...) printk(x) -#else -#define DEBUG_FD_CACHE(x...) -#endif - -#if 1 -#define DEBUG_FD_FUA(x...) printk(x) -#else -#define DEBUG_FD_FUA(x...) -#endif - static struct se_subsystem_api fileio_template; /* fd_attach_hba(): (Part of se_subsystem_api_t template) @@ -65,8 +53,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) struct fd_host *fd_host; fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); - if (!(fd_host)) { - printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); + if (!fd_host) { + pr_err("Unable to allocate memory for struct fd_host\n"); return -ENOMEM; } @@ -74,10 +62,10 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id) hba->hba_ptr = fd_host; - printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" + pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" " Target Core Stack %s\n", hba->hba_id, FD_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" " MaxSectors: %u\n", hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS); @@ -88,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba) { struct fd_host *fd_host = hba->hba_ptr; - printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" + pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" " Target Core\n", hba->hba_id, fd_host->fd_host_id); kfree(fd_host); @@ -101,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); - if (!(fd_dev)) { - printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); + if (!fd_dev) { + pr_err("Unable to allocate memory for struct fd_dev\n"); return NULL; } fd_dev->fd_host = fd_host; - printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); + pr_debug("FILEIO: Allocated fd_dev for %p\n", name); return fd_dev; } @@ -141,7 +129,7 @@ static struct se_device *fd_create_virtdevice( set_fs(old_fs); if (IS_ERR(dev_p)) { - printk(KERN_ERR "getname(%s) failed: %lu\n", + pr_err("getname(%s) failed: %lu\n", fd_dev->fd_dev_name, IS_ERR(dev_p)); ret = PTR_ERR(dev_p); goto fail; @@ -164,12 +152,12 @@ static struct se_device *fd_create_virtdevice( file = filp_open(dev_p, flags, 0600); if (IS_ERR(file)) { - printk(KERN_ERR "filp_open(%s) failed\n", dev_p); + pr_err("filp_open(%s) failed\n", dev_p); ret = PTR_ERR(file); goto fail; } if (!file || !file->f_dentry) { - printk(KERN_ERR "filp_open(%s) failed\n", dev_p); + pr_err("filp_open(%s) failed\n", dev_p); goto fail; } fd_dev->fd_file = file; @@ -199,14 +187,14 @@ static struct se_device *fd_create_virtdevice( fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - fd_dev->fd_block_size); - printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" + pr_debug("FILEIO: Using size: %llu bytes from struct" " block_device blocks: %llu logical_block_size: %d\n", fd_dev->fd_dev_size, div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); } else { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { - printk(KERN_ERR "FILEIO: Missing fd_dev_size=" + pr_err("FILEIO: Missing fd_dev_size=" " parameter, and no backing struct" " block_device\n"); goto fail; @@ -225,13 +213,13 @@ static struct se_device *fd_create_virtdevice( dev = transport_add_device_to_core_hba(hba, &fileio_template, se_dev, dev_flags, fd_dev, &dev_limits, "FILEIO", FD_VERSION); - if (!(dev)) + if (!dev) goto fail; fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; fd_dev->fd_queue_depth = dev->queue_depth; - printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," + pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, fd_dev->fd_dev_name, fd_dev->fd_dev_size); @@ -269,25 +257,24 @@ static inline struct fd_request *FILE_REQ(struct se_task *task) static struct se_task * -fd_alloc_task(struct se_cmd *cmd) +fd_alloc_task(unsigned char *cdb) { struct fd_request *fd_req; fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); - if (!(fd_req)) { - printk(KERN_ERR "Unable to allocate struct fd_request\n"); + if (!fd_req) { + pr_err("Unable to allocate struct fd_request\n"); return NULL; } - fd_req->fd_dev = cmd->se_dev->dev_ptr; - return &fd_req->fd_task; } static int fd_do_readv(struct se_task *task) { struct fd_request *req = FILE_REQ(task); - struct file *fd = req->fd_dev->fd_file; + struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; + struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; @@ -295,20 +282,20 @@ static int fd_do_readv(struct se_task *task) task->se_dev->se_sub_dev->se_dev_attrib.block_size); int ret = 0, i; - iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); - if (!(iov)) { - printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); + iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); + if (!iov) { + pr_err("Unable to allocate fd_do_readv iov[]\n"); return -ENOMEM; } - for (i = 0; i < task->task_sg_num; i++) { + for (i = 0; i < task->task_sg_nents; i++) { iov[i].iov_len = sg[i].length; iov[i].iov_base = sg_virt(&sg[i]); } old_fs = get_fs(); set_fs(get_ds()); - ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); + ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos); set_fs(old_fs); kfree(iov); @@ -319,14 +306,14 @@ static int fd_do_readv(struct se_task *task) */ if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { if (ret < 0 || ret != task->task_size) { - printk(KERN_ERR "vfs_readv() returned %d," + pr_err("vfs_readv() returned %d," " expecting %d for S_ISBLK\n", ret, (int)task->task_size); return (ret < 0 ? ret : -EINVAL); } } else { if (ret < 0) { - printk(KERN_ERR "vfs_readv() returned %d for non" + pr_err("vfs_readv() returned %d for non" " S_ISBLK\n", ret); return ret; } @@ -338,7 +325,8 @@ static int fd_do_readv(struct se_task *task) static int fd_do_writev(struct se_task *task) { struct fd_request *req = FILE_REQ(task); - struct file *fd = req->fd_dev->fd_file; + struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; + struct file *fd = dev->fd_file; struct scatterlist *sg = task->task_sg; struct iovec *iov; mm_segment_t old_fs; @@ -346,26 +334,26 @@ static int fd_do_writev(struct se_task *task) task->se_dev->se_sub_dev->se_dev_attrib.block_size); int ret, i = 0; - iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); - if (!(iov)) { - printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); + iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); + if (!iov) { + pr_err("Unable to allocate fd_do_writev iov[]\n"); return -ENOMEM; } - for (i = 0; i < task->task_sg_num; i++) { + for (i = 0; i < task->task_sg_nents; i++) { iov[i].iov_len = sg[i].length; iov[i].iov_base = sg_virt(&sg[i]); } old_fs = get_fs(); set_fs(get_ds()); - ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); + ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos); set_fs(old_fs); kfree(iov); if (ret < 0 || ret != task->task_size) { - printk(KERN_ERR "vfs_writev() returned %d\n", ret); + pr_err("vfs_writev() returned %d\n", ret); return (ret < 0 ? ret : -EINVAL); } @@ -404,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task) ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0) - printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); + pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); if (!immed) transport_complete_sync_cache(cmd, ret == 0); @@ -449,12 +437,12 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) loff_t end = start + task->task_size; int ret; - DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", + pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", task->task_lba, task->task_size); ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0) - printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); + pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret); } static int fd_do_task(struct se_task *task) @@ -548,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params( snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, "%s", arg_p); kfree(arg_p); - printk(KERN_INFO "FILEIO: Referencing Path: %s\n", + pr_debug("FILEIO: Referencing Path: %s\n", fd_dev->fd_dev_name); fd_dev->fbd_flags |= FBDF_HAS_PATH; break; @@ -561,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params( ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); kfree(arg_p); if (ret < 0) { - printk(KERN_ERR "strict_strtoull() failed for" + pr_err("strict_strtoull() failed for" " fd_dev_size=\n"); goto out; } - printk(KERN_INFO "FILEIO: Referencing Size: %llu" + pr_debug("FILEIO: Referencing Size: %llu" " bytes\n", fd_dev->fd_dev_size); fd_dev->fbd_flags |= FBDF_HAS_SIZE; break; case Opt_fd_buffered_io: match_int(args, &arg); if (arg != 1) { - printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); + pr_err("bogus fd_buffered_io=%d value\n", arg); ret = -EINVAL; goto out; } - printk(KERN_INFO "FILEIO: Using buffered I/O" + pr_debug("FILEIO: Using buffered I/O" " operations for struct fd_dev\n"); fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; @@ -597,7 +585,7 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { - printk(KERN_ERR "Missing fd_dev_name=\n"); + pr_err("Missing fd_dev_name=\n"); return -EINVAL; } diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index 6386d3f60631..daebd710b893 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -16,8 +16,6 @@ struct fd_request { struct se_task fd_task; /* SCSI CDB from iSCSI Command PDU */ unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; - /* FILEIO device */ - struct fd_dev *fd_dev; } ____cacheline_aligned; #define FBDF_HAS_PATH 0x01 diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index bd9da25bc945..0639b975d6f5 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -58,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api) mutex_lock(&subsystem_mutex); list_for_each_entry(s, &subsystem_list, sub_api_list) { - if (!(strcmp(s->name, sub_api->name))) { - printk(KERN_ERR "%p is already registered with" + if (!strcmp(s->name, sub_api->name)) { + pr_err("%p is already registered with" " duplicate name %s, unable to process" " request\n", s, s->name); mutex_unlock(&subsystem_mutex); @@ -69,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api) list_add_tail(&sub_api->sub_api_list, &subsystem_list); mutex_unlock(&subsystem_mutex); - printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:" + pr_debug("TCM: Registered subsystem plugin: %s struct module:" " %p\n", sub_api->name, sub_api->owner); return 0; } @@ -109,7 +109,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) hba = kzalloc(sizeof(*hba), GFP_KERNEL); if (!hba) { - printk(KERN_ERR "Unable to allocate struct se_hba\n"); + pr_err("Unable to allocate struct se_hba\n"); return ERR_PTR(-ENOMEM); } @@ -135,7 +135,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) list_add_tail(&hba->hba_node, &hba_list); spin_unlock(&hba_lock); - printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target" + pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target" " Core\n", hba->hba_id); return hba; @@ -161,7 +161,7 @@ core_delete_hba(struct se_hba *hba) list_del(&hba->hba_node); spin_unlock(&hba_lock); - printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target" + pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" " Core\n", hba->hba_id); if (hba->transport->owner) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 164b72106b88..251fc66a8212 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -47,12 +47,6 @@ #include "target_core_iblock.h" -#if 0 -#define DEBUG_IBLOCK(x...) printk(x) -#else -#define DEBUG_IBLOCK(x...) -#endif - static struct se_subsystem_api iblock_template; static void iblock_bio_done(struct bio *, int); @@ -66,8 +60,8 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) struct iblock_hba *ib_host; ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); - if (!(ib_host)) { - printk(KERN_ERR "Unable to allocate memory for" + if (!ib_host) { + pr_err("Unable to allocate memory for" " struct iblock_hba\n"); return -ENOMEM; } @@ -76,11 +70,11 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id) hba->hba_ptr = ib_host; - printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" + pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", + pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", hba->hba_id, ib_host->iblock_host_id); return 0; @@ -90,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba) { struct iblock_hba *ib_host = hba->hba_ptr; - printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" + pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" " Target Core\n", hba->hba_id, ib_host->iblock_host_id); kfree(ib_host); @@ -103,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) struct iblock_hba *ib_host = hba->hba_ptr; ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); - if (!(ib_dev)) { - printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); + if (!ib_dev) { + pr_err("Unable to allocate struct iblock_dev\n"); return NULL; } ib_dev->ibd_host = ib_host; - printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); + pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); return ib_dev; } @@ -128,8 +122,8 @@ static struct se_device *iblock_create_virtdevice( u32 dev_flags = 0; int ret = -EINVAL; - if (!(ib_dev)) { - printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); + if (!ib_dev) { + pr_err("Unable to locate struct iblock_dev parameter\n"); return ERR_PTR(ret); } memset(&dev_limits, 0, sizeof(struct se_dev_limits)); @@ -137,16 +131,16 @@ static struct se_device *iblock_create_virtdevice( * These settings need to be made tunable.. */ ib_dev->ibd_bio_set = bioset_create(32, 64); - if (!(ib_dev->ibd_bio_set)) { - printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); + if (!ib_dev->ibd_bio_set) { + pr_err("IBLOCK: Unable to create bioset()\n"); return ERR_PTR(-ENOMEM); } - printk(KERN_INFO "IBLOCK: Created bio_set()\n"); + pr_debug("IBLOCK: Created bio_set()\n"); /* * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. */ - printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", + pr_debug( "IBLOCK: Claiming struct block_device: %s\n", ib_dev->ibd_udev_path); bd = blkdev_get_by_path(ib_dev->ibd_udev_path, @@ -172,7 +166,7 @@ static struct se_device *iblock_create_virtdevice( dev = transport_add_device_to_core_hba(hba, &iblock_template, se_dev, dev_flags, ib_dev, &dev_limits, "IBLOCK", IBLOCK_VERSION); - if (!(dev)) + if (!dev) goto failed; /* @@ -192,7 +186,7 @@ static struct se_device *iblock_create_virtdevice( dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = q->limits.discard_alignment; - printk(KERN_INFO "IBLOCK: BLOCK Discard support available," + pr_debug("IBLOCK: BLOCK Discard support available," " disabled by default\n"); } @@ -227,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) } static struct se_task * -iblock_alloc_task(struct se_cmd *cmd) +iblock_alloc_task(unsigned char *cdb) { struct iblock_req *ib_req; ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); - if (!(ib_req)) { - printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); + if (!ib_req) { + pr_err("Unable to allocate memory for struct iblock_req\n"); return NULL; } - ib_req->ib_dev = cmd->se_dev->dev_ptr; atomic_set(&ib_req->ib_bio_cnt, 0); return &ib_req->ib_task; } @@ -345,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task) */ ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); if (ret != 0) { - printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " + pr_err("IBLOCK: block_issue_flush() failed: %d " " error_sector: %llu\n", ret, (unsigned long long)error_sector); } @@ -409,8 +402,9 @@ static int iblock_do_task(struct se_task *task) while (bio) { nbio = bio->bi_next; bio->bi_next = NULL; - DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" - " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); + pr_debug("Calling submit_bio() task: %p bio: %p" + " bio->bi_sector: %llu\n", task, bio, + (unsigned long long)bio->bi_sector); submit_bio(rw, bio); bio = nbio; @@ -480,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, switch (token) { case Opt_udev_path: if (ib_dev->ibd_bd) { - printk(KERN_ERR "Unable to set udev_path= while" + pr_err("Unable to set udev_path= while" " ib_dev->ibd_bd exists\n"); ret = -EEXIST; goto out; @@ -493,7 +487,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, "%s", arg_p); kfree(arg_p); - printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", + pr_debug("IBLOCK: Referencing UDEV path: %s\n", ib_dev->ibd_udev_path); ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; break; @@ -516,7 +510,7 @@ static ssize_t iblock_check_configfs_dev_params( struct iblock_dev *ibd = se_dev->se_dev_su_ptr; if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { - printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); + pr_err("Missing udev_path= parameters for IBLOCK\n"); return -EINVAL; } @@ -574,15 +568,15 @@ static struct bio *iblock_get_bio( struct bio *bio; bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); - if (!(bio)) { - printk(KERN_ERR "Unable to allocate memory for bio\n"); + if (!bio) { + pr_err("Unable to allocate memory for bio\n"); *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; return NULL; } - DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" - " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); - DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); + pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" + " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); + pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); bio->bi_bdev = ib_dev->ibd_bd; bio->bi_private = task; @@ -591,8 +585,8 @@ static struct bio *iblock_get_bio( bio->bi_sector = lba; atomic_inc(&ib_req->ib_bio_cnt); - DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); - DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", + pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); + pr_debug("Set ib_req->ib_bio_cnt: %d\n", atomic_read(&ib_req->ib_bio_cnt)); return bio; } @@ -606,7 +600,7 @@ static int iblock_map_task_SG(struct se_task *task) struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; struct scatterlist *sg; int ret = 0; - u32 i, sg_num = task->task_sg_num; + u32 i, sg_num = task->task_sg_nents; sector_t block_lba; /* * Do starting conversion up from non 512-byte blocksize with @@ -621,13 +615,13 @@ static int iblock_map_task_SG(struct se_task *task) else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) block_lba = task->task_lba; else { - printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" + pr_err("Unsupported SCSI -> BLOCK LBA conversion:" " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); return PYX_TRANSPORT_LU_COMM_FAILURE; } bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); - if (!(bio)) + if (!bio) return ret; ib_req->ib_bio = bio; @@ -636,41 +630,41 @@ static int iblock_map_task_SG(struct se_task *task) * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist * from task->task_sg -> struct scatterlist memory. */ - for_each_sg(task->task_sg, sg, task->task_sg_num, i) { - DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" + for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { + pr_debug("task: %p bio: %p Calling bio_add_page(): page:" " %p len: %u offset: %u\n", task, bio, sg_page(sg), sg->length, sg->offset); again: ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); if (ret != sg->length) { - DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", - bio->bi_sector); - DEBUG_IBLOCK("** task->task_size: %u\n", + pr_debug("*** Set bio->bi_sector: %llu\n", + (unsigned long long)bio->bi_sector); + pr_debug("** task->task_size: %u\n", task->task_size); - DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", + pr_debug("*** bio->bi_max_vecs: %u\n", bio->bi_max_vecs); - DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", + pr_debug("*** bio->bi_vcnt: %u\n", bio->bi_vcnt); bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); - if (!(bio)) + if (!bio) goto fail; tbio = tbio->bi_next = bio; - DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" + pr_debug("-----------------> Added +1 bio: %p to" " list, Going to again\n", bio); goto again; } /* Always in 512 byte units for Linux/Block */ block_lba += sg->length >> IBLOCK_LBA_SHIFT; sg_num--; - DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" + pr_debug("task: %p bio-add_page() passed!, decremented" " sg_num to %u\n", task, sg_num); - DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" - " to %llu\n", task, block_lba); - DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" + pr_debug("task: %p bio_add_page() passed!, increased lba" + " to %llu\n", task, (unsigned long long)block_lba); + pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" " %u\n", task, bio->bi_vcnt); } @@ -716,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err) /* * Set -EIO if !BIO_UPTODATE and the passed is still err=0 */ - if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) + if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) err = -EIO; if (err != 0) { - printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," + pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," " err: %d\n", bio, err); /* * Bump the ib_bio_err_cnt and release bio. @@ -731,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err) /* * Wait to complete the task until the last bio as completed. */ - if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) + if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) return; ibr->ib_bio = NULL; transport_complete_task(task, 0); return; } - DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", - task, bio, task->task_lba, bio->bi_sector, err); + pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", + task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); /* * bio_put() will call iblock_bio_destructor() to release the bio back * to ibr->ib_bio_set. @@ -748,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err) /* * Wait to complete the task until the last bio as completed. */ - if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) + if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) return; /* * Return GOOD status for task if zero ib_bio_err_cnt exists. diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index 2aa1d27a49b9..a121cd1b6575 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -12,7 +12,6 @@ struct iblock_req { atomic_t ib_bio_cnt; atomic_t ib_bio_err_cnt; struct bio *ib_bio; - struct iblock_dev *ib_dev; } ____cacheline_aligned; #define IBDF_HAS_UDEV_PATH 0x01 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 3342843f8619..1c1b849cd4fb 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -62,7 +62,7 @@ int core_pr_dump_initiator_port( char *buf, u32 size) { - if (!(pr_reg->isid_present_at_reg)) + if (!pr_reg->isid_present_at_reg) return 0; snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]); @@ -95,7 +95,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type) struct se_session *sess = cmd->se_sess; int ret; - if (!(sess)) + if (!sess) return 0; spin_lock(&dev->dev_reservation_lock); @@ -123,7 +123,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd) struct se_session *sess = cmd->se_sess; struct se_portal_group *tpg = sess->se_tpg; - if (!(sess) || !(tpg)) + if (!sess || !tpg) return 0; spin_lock(&dev->dev_reservation_lock); @@ -142,7 +142,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd) dev->dev_res_bin_isid = 0; dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID; } - printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->" + pr_debug("SCSI-2 Released reservation for %s LUN: %u ->" " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(), cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); @@ -159,7 +159,7 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) if ((cmd->t_task_cdb[1] & 0x01) && (cmd->t_task_cdb[1] & 0x02)) { - printk(KERN_ERR "LongIO and Obselete Bits set, returning" + pr_err("LongIO and Obselete Bits set, returning" " ILLEGAL_REQUEST\n"); return PYX_TRANSPORT_ILLEGAL_REQUEST; } @@ -167,18 +167,18 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) * This is currently the case for target_core_mod passthrough struct se_cmd * ops */ - if (!(sess) || !(tpg)) + if (!sess || !tpg) return 0; spin_lock(&dev->dev_reservation_lock); if (dev->dev_reserved_node_acl && (dev->dev_reserved_node_acl != sess->se_node_acl)) { - printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n", + pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n", tpg->se_tpg_tfo->get_fabric_name()); - printk(KERN_ERR "Original reserver LUN: %u %s\n", + pr_err("Original reserver LUN: %u %s\n", cmd->se_lun->unpacked_lun, dev->dev_reserved_node_acl->initiatorname); - printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u" + pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u" " from %s \n", cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); @@ -192,7 +192,7 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd) dev->dev_res_bin_isid = sess->sess_bin_isid; dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID; } - printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" + pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u" " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun, sess->se_node_acl->initiatorname); @@ -220,10 +220,10 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS); int conflict = 0; - if (!(se_sess)) + if (!se_sess) return 0; - if (!(crh)) + if (!crh) goto after_crh; pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, @@ -280,7 +280,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd) } if (conflict) { - printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE" + pr_err("Received legacy SPC-2 RESERVE/RELEASE" " while active SPC-3 registrations exist," " returning RESERVATION_CONFLICT\n"); return PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -412,7 +412,7 @@ static int core_scsi3_pr_seq_non_holder( ret = (registered_nexus) ? 0 : 1; break; default: - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" + pr_err("Unknown PERSISTENT_RESERVE_OUT service" " action: 0x%02x\n", cdb[1] & 0x1f); return -EINVAL; } @@ -459,7 +459,7 @@ static int core_scsi3_pr_seq_non_holder( ret = 0; /* Allowed */ break; default: - printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n", + pr_err("Unknown MI Service Action: 0x%02x\n", (cdb[1] & 0x1f)); return -EINVAL; } @@ -481,9 +481,9 @@ static int core_scsi3_pr_seq_non_holder( * Case where the CDB is explicitly allowed in the above switch * statement. */ - if (!(ret) && !(other_cdb)) { + if (!ret && !other_cdb) { #if 0 - printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s" + pr_debug("Allowing explict CDB: 0x%02x for %s" " reservation holder\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); #endif @@ -498,7 +498,7 @@ static int core_scsi3_pr_seq_non_holder( /* * Conflict for write exclusive */ - printk(KERN_INFO "%s Conflict for unregistered nexus" + pr_debug("%s Conflict for unregistered nexus" " %s CDB: 0x%02x to %s reservation\n", transport_dump_cmd_direction(cmd), se_sess->se_node_acl->initiatorname, cdb[0], @@ -515,8 +515,8 @@ static int core_scsi3_pr_seq_non_holder( * nexuses to issue CDBs. */ #if 0 - if (!(registered_nexus)) { - printk(KERN_INFO "Allowing implict CDB: 0x%02x" + if (!registered_nexus) { + pr_debug("Allowing implict CDB: 0x%02x" " for %s reservation on unregistered" " nexus\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); @@ -531,14 +531,14 @@ static int core_scsi3_pr_seq_non_holder( * allow commands from registered nexuses. */ #if 0 - printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s" + pr_debug("Allowing implict CDB: 0x%02x for %s" " reservation\n", cdb[0], core_scsi3_pr_dump_type(pr_reg_type)); #endif return 0; } } - printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x" + pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" " for %s reservation\n", transport_dump_cmd_direction(cmd), (registered_nexus) ? "" : "un", se_sess->se_node_acl->initiatorname, cdb[0], @@ -575,7 +575,7 @@ static int core_scsi3_pr_reservation_check( struct se_session *sess = cmd->se_sess; int ret; - if (!(sess)) + if (!sess) return 0; /* * A legacy SPC-2 reservation is being held. @@ -584,7 +584,7 @@ static int core_scsi3_pr_reservation_check( return core_scsi2_reservation_check(cmd, pr_reg_type); spin_lock(&dev->dev_reservation_lock); - if (!(dev->dev_pr_res_holder)) { + if (!dev->dev_pr_res_holder) { spin_unlock(&dev->dev_reservation_lock); return 0; } @@ -594,7 +594,7 @@ static int core_scsi3_pr_reservation_check( spin_unlock(&dev->dev_reservation_lock); return -EINVAL; } - if (!(dev->dev_pr_res_holder->isid_present_at_reg)) { + if (!dev->dev_pr_res_holder->isid_present_at_reg) { spin_unlock(&dev->dev_reservation_lock); return 0; } @@ -624,15 +624,15 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration( struct t10_pr_registration *pr_reg; pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC); - if (!(pr_reg)) { - printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); + if (!pr_reg) { + pr_err("Unable to allocate struct t10_pr_registration\n"); return NULL; } pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len, GFP_ATOMIC); - if (!(pr_reg->pr_aptpl_buf)) { - printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n"); + if (!pr_reg->pr_aptpl_buf) { + pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n"); kmem_cache_free(t10_pr_reg_cache, pr_reg); return NULL; } @@ -692,12 +692,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( */ pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid, sa_res_key, all_tg_pt, aptpl); - if (!(pr_reg)) + if (!pr_reg) return NULL; /* * Return pointer to pr_reg for ALL_TG_PT=0 */ - if (!(all_tg_pt)) + if (!all_tg_pt) return pr_reg; /* * Create list of matching SCSI Initiator Port registrations @@ -717,7 +717,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( * that have not been make explict via a ConfigFS * MappedLUN group for the SCSI Initiator Node ACL. */ - if (!(deve_tmp->se_lun_acl)) + if (!deve_tmp->se_lun_acl) continue; nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl; @@ -751,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( */ ret = core_scsi3_lunacl_depend_item(deve_tmp); if (ret < 0) { - printk(KERN_ERR "core_scsi3_lunacl_depend" + pr_err("core_scsi3_lunacl_depend" "_item() failed\n"); atomic_dec(&port->sep_tg_pt_ref_cnt); smp_mb__after_atomic_dec(); @@ -769,7 +769,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration( pr_reg_atp = __core_scsi3_do_alloc_registration(dev, nacl_tmp, deve_tmp, NULL, sa_res_key, all_tg_pt, aptpl); - if (!(pr_reg_atp)) { + if (!pr_reg_atp) { atomic_dec(&port->sep_tg_pt_ref_cnt); smp_mb__after_atomic_dec(); atomic_dec(&deve_tmp->pr_ref_count); @@ -817,14 +817,14 @@ int core_scsi3_alloc_aptpl_registration( { struct t10_pr_registration *pr_reg; - if (!(i_port) || !(t_port) || !(sa_res_key)) { - printk(KERN_ERR "Illegal parameters for APTPL registration\n"); + if (!i_port || !t_port || !sa_res_key) { + pr_err("Illegal parameters for APTPL registration\n"); return -EINVAL; } pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL); - if (!(pr_reg)) { - printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n"); + if (!pr_reg) { + pr_err("Unable to allocate struct t10_pr_registration\n"); return -ENOMEM; } pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); @@ -869,7 +869,7 @@ int core_scsi3_alloc_aptpl_registration( pr_reg->pr_res_holder = res_holder; list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list); - printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from" + pr_debug("SPC-3 PR APTPL Successfully added registration%s from" " metadata\n", (res_holder) ? "+reservation" : ""); return 0; } @@ -891,12 +891,12 @@ static void core_scsi3_aptpl_reserve( dev->dev_pr_res_holder = pr_reg; spin_unlock(&dev->dev_reservation_lock); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created" + pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created" " new reservation holder TYPE: %s ALL_TG_PT: %d\n", tpg->se_tpg_tfo->get_fabric_name(), core_scsi3_pr_dump_type(pr_reg->pr_res_type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); - printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", + pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname, (prf_isid) ? &i_buf[0] : ""); } @@ -936,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration( spin_lock(&pr_tmpl->aptpl_reg_lock); list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, pr_reg_aptpl_list) { - if (!(strcmp(pr_reg->pr_iport, i_port)) && + if (!strcmp(pr_reg->pr_iport, i_port) && (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && !(strcmp(pr_reg->pr_tport, t_port)) && (pr_reg->pr_reg_tpgt == tpgt) && @@ -1006,19 +1006,19 @@ static void __core_scsi3_dump_registration( prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator" + pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator" " Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ? "_AND_MOVE" : (register_type == 1) ? "_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname, (prf_isid) ? i_buf : ""); - printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", + pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg)); - printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" + pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" " Port(s)\n", tfo->get_fabric_name(), (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", dev->transport->name); - printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" + pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" " 0x%08x APTPL: %d\n", tfo->get_fabric_name(), pr_reg->pr_res_key, pr_reg->pr_res_generation, pr_reg->pr_reg_aptpl); @@ -1062,7 +1062,7 @@ static void __core_scsi3_add_registration( /* * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE. */ - if (!(pr_reg->pr_reg_all_tg_pt) || (register_move)) + if (!pr_reg->pr_reg_all_tg_pt || register_move) return; /* * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1 @@ -1106,7 +1106,7 @@ static int core_scsi3_alloc_registration( pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid, sa_res_key, all_tg_pt, aptpl); - if (!(pr_reg)) + if (!pr_reg) return -EPERM; __core_scsi3_add_registration(dev, nacl, pr_reg, @@ -1137,7 +1137,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( * If this registration does NOT contain a fabric provided * ISID, then we have found a match. */ - if (!(pr_reg->isid_present_at_reg)) { + if (!pr_reg->isid_present_at_reg) { /* * Determine if this SCSI device server requires that * SCSI Intiatior TransportID w/ ISIDs is enforced @@ -1157,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg( * SCSI Initiator Port TransportIDs, then we expect a valid * matching ISID to be provided by the local SCSI Initiator Port. */ - if (!(isid)) + if (!isid) continue; if (strcmp(isid, pr_reg->pr_reg_isid)) continue; @@ -1206,7 +1206,7 @@ static int core_scsi3_check_implict_release( spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; - if (!(pr_res_holder)) { + if (!pr_res_holder) { spin_unlock(&dev->dev_reservation_lock); return ret; } @@ -1236,7 +1236,7 @@ static int core_scsi3_check_implict_release( (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname, pr_reg->pr_reg_nacl->initiatorname)) && (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) { - printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1" + pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1" " UNREGISTER while existing reservation with matching" " key 0x%016Lx is present from another SCSI Initiator" " Port\n", pr_reg->pr_res_key); @@ -1283,25 +1283,25 @@ static void __core_scsi3_free_registration( */ while (atomic_read(&pr_reg->pr_res_holders) != 0) { spin_unlock(&pr_tmpl->registration_lock); - printk("SPC-3 PR [%s] waiting for pr_res_holders\n", + pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n", tfo->get_fabric_name()); cpu_relax(); spin_lock(&pr_tmpl->registration_lock); } - printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator" + pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator" " Node: %s%s\n", tfo->get_fabric_name(), pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); - printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" + pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target" " Port(s)\n", tfo->get_fabric_name(), (pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE", dev->transport->name); - printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" + pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:" " 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key, pr_reg->pr_res_generation); - if (!(preempt_and_abort_list)) { + if (!preempt_and_abort_list) { pr_reg->pr_reg_deve = NULL; pr_reg->pr_reg_nacl = NULL; kfree(pr_reg->pr_aptpl_buf); @@ -1430,7 +1430,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) /* * For nacl->dynamic_node_acl=1 */ - if (!(lun_acl)) + if (!lun_acl) return 0; nacl = lun_acl->se_lun_nacl; @@ -1448,7 +1448,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve) /* * For nacl->dynamic_node_acl=1 */ - if (!(lun_acl)) { + if (!lun_acl) { atomic_dec(&se_deve->pr_ref_count); smp_mb__after_atomic_dec(); return; @@ -1500,8 +1500,8 @@ static int core_scsi3_decode_spec_i_port( * processing in the loop of tid_dest_list below. */ tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); - if (!(tidh_new)) { - printk(KERN_ERR "Unable to allocate tidh_new\n"); + if (!tidh_new) { + pr_err("Unable to allocate tidh_new\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } INIT_LIST_HEAD(&tidh_new->dest_list); @@ -1512,7 +1512,7 @@ static int core_scsi3_decode_spec_i_port( local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, local_se_deve, l_isid, sa_res_key, all_tg_pt, aptpl); - if (!(local_pr_reg)) { + if (!local_pr_reg) { kfree(tidh_new); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -1537,7 +1537,7 @@ static int core_scsi3_decode_spec_i_port( tpdl |= buf[27] & 0xff; if ((tpdl + 28) != cmd->data_length) { - printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header" + pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" " does not equal CDB data_length: %u\n", tpdl, cmd->data_length); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -1557,13 +1557,13 @@ static int core_scsi3_decode_spec_i_port( spin_lock(&dev->se_port_lock); list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) { tmp_tpg = tmp_port->sep_tpg; - if (!(tmp_tpg)) + if (!tmp_tpg) continue; tmp_tf_ops = tmp_tpg->se_tpg_tfo; - if (!(tmp_tf_ops)) + if (!tmp_tf_ops) continue; - if (!(tmp_tf_ops->get_fabric_proto_ident) || - !(tmp_tf_ops->tpg_parse_pr_out_transport_id)) + if (!tmp_tf_ops->get_fabric_proto_ident || + !tmp_tf_ops->tpg_parse_pr_out_transport_id) continue; /* * Look for the matching proto_ident provided by @@ -1577,7 +1577,7 @@ static int core_scsi3_decode_spec_i_port( i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id( tmp_tpg, (const char *)ptr, &tid_len, &iport_ptr); - if (!(i_str)) + if (!i_str) continue; atomic_inc(&tmp_tpg->tpg_pr_ref_count); @@ -1586,7 +1586,7 @@ static int core_scsi3_decode_spec_i_port( ret = core_scsi3_tpg_depend_item(tmp_tpg); if (ret != 0) { - printk(KERN_ERR " core_scsi3_tpg_depend_item()" + pr_err(" core_scsi3_tpg_depend_item()" " for tmp_tpg\n"); atomic_dec(&tmp_tpg->tpg_pr_ref_count); smp_mb__after_atomic_dec(); @@ -1607,7 +1607,7 @@ static int core_scsi3_decode_spec_i_port( } spin_unlock_bh(&tmp_tpg->acl_node_lock); - if (!(dest_node_acl)) { + if (!dest_node_acl) { core_scsi3_tpg_undepend_item(tmp_tpg); spin_lock(&dev->se_port_lock); continue; @@ -1615,7 +1615,7 @@ static int core_scsi3_decode_spec_i_port( ret = core_scsi3_nodeacl_depend_item(dest_node_acl); if (ret != 0) { - printk(KERN_ERR "configfs_depend_item() failed" + pr_err("configfs_depend_item() failed" " for dest_node_acl->acl_group\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_dec(); @@ -1625,7 +1625,7 @@ static int core_scsi3_decode_spec_i_port( } dest_tpg = tmp_tpg; - printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:" + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:" " %s Port RTPI: %hu\n", dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, dest_rtpi); @@ -1635,20 +1635,20 @@ static int core_scsi3_decode_spec_i_port( } spin_unlock(&dev->se_port_lock); - if (!(dest_tpg)) { - printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate" + if (!dest_tpg) { + pr_err("SPC-3 PR SPEC_I_PT: Unable to locate" " dest_tpg\n"); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; goto out; } #if 0 - printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" + pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u" " tid_len: %d for %s + %s\n", dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length, tpdl, tid_len, i_str, iport_ptr); #endif if (tid_len > tpdl) { - printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:" + pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:" " %u for Transport ID: %s\n", tid_len, ptr); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); @@ -1662,8 +1662,8 @@ static int core_scsi3_decode_spec_i_port( */ dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, dest_rtpi); - if (!(dest_se_deve)) { - printk(KERN_ERR "Unable to locate %s dest_se_deve" + if (!dest_se_deve) { + pr_err("Unable to locate %s dest_se_deve" " from destination RTPI: %hu\n", dest_tpg->se_tpg_tfo->get_fabric_name(), dest_rtpi); @@ -1676,7 +1676,7 @@ static int core_scsi3_decode_spec_i_port( ret = core_scsi3_lunacl_depend_item(dest_se_deve); if (ret < 0) { - printk(KERN_ERR "core_scsi3_lunacl_depend_item()" + pr_err("core_scsi3_lunacl_depend_item()" " failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); smp_mb__after_atomic_dec(); @@ -1686,7 +1686,7 @@ static int core_scsi3_decode_spec_i_port( goto out; } #if 0 - printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s" + pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s" " dest_se_deve mapped_lun: %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); @@ -1714,8 +1714,8 @@ static int core_scsi3_decode_spec_i_port( */ tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL); - if (!(tidh_new)) { - printk(KERN_ERR "Unable to allocate tidh_new\n"); + if (!tidh_new) { + pr_err("Unable to allocate tidh_new\n"); core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); @@ -1746,7 +1746,7 @@ static int core_scsi3_decode_spec_i_port( dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, all_tg_pt, aptpl); - if (!(dest_pr_reg)) { + if (!dest_pr_reg) { core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); @@ -1795,7 +1795,7 @@ static int core_scsi3_decode_spec_i_port( __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, dest_pr_reg, 0, 0); - printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully" + pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully" " registered Transport ID for Node: %s%s Mapped LUN:" " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(), dest_node_acl->initiatorname, (prf_isid) ? @@ -1923,7 +1923,7 @@ static int __core_scsi3_update_aptpl_buf( } if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { - printk(KERN_ERR "Unable to update renaming" + pr_err("Unable to update renaming" " APTPL metadata\n"); spin_unlock(&su_dev->t10_pr.registration_lock); return -EMSGSIZE; @@ -1941,7 +1941,7 @@ static int __core_scsi3_update_aptpl_buf( lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { - printk(KERN_ERR "Unable to update renaming" + pr_err("Unable to update renaming" " APTPL metadata\n"); spin_unlock(&su_dev->t10_pr.registration_lock); return -EMSGSIZE; @@ -1951,7 +1951,7 @@ static int __core_scsi3_update_aptpl_buf( } spin_unlock(&su_dev->t10_pr.registration_lock); - if (!(reg_count)) + if (!reg_count) len += sprintf(buf+len, "No Registrations or Reservations"); return 0; @@ -1993,7 +1993,7 @@ static int __core_scsi3_write_aptpl_to_file( memset(path, 0, 512); if (strlen(&wwn->unit_serial[0]) >= 512) { - printk(KERN_ERR "WWN value for struct se_device does not fit" + pr_err("WWN value for struct se_device does not fit" " into path buffer\n"); return -EMSGSIZE; } @@ -2001,13 +2001,13 @@ static int __core_scsi3_write_aptpl_to_file( snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]); file = filp_open(path, flags, 0600); if (IS_ERR(file) || !file || !file->f_dentry) { - printk(KERN_ERR "filp_open(%s) for APTPL metadata" + pr_err("filp_open(%s) for APTPL metadata" " failed\n", path); return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT); } iov[0].iov_base = &buf[0]; - if (!(pr_aptpl_buf_len)) + if (!pr_aptpl_buf_len) iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */ else iov[0].iov_len = pr_aptpl_buf_len; @@ -2018,7 +2018,7 @@ static int __core_scsi3_write_aptpl_to_file( set_fs(old_fs); if (ret < 0) { - printk("Error writing APTPL metadata file: %s\n", path); + pr_debug("Error writing APTPL metadata file: %s\n", path); filp_close(file, NULL); return -EIO; } @@ -2038,7 +2038,7 @@ static int core_scsi3_update_and_write_aptpl( /* * Can be called with a NULL pointer from PROUT service action CLEAR */ - if (!(in_buf)) { + if (!in_buf) { memset(null_buf, 0, 64); buf = &null_buf[0]; /* @@ -2088,8 +2088,8 @@ static int core_scsi3_emulate_pro_register( unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL; int pr_holder = 0, ret = 0, type; - if (!(se_sess) || !(se_lun)) { - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } se_tpg = se_sess->se_tpg; @@ -2105,19 +2105,19 @@ static int core_scsi3_emulate_pro_register( * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47 */ pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg_e)) { + if (!pr_reg_e) { if (res_key) { - printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero" + pr_warn("SPC-3 PR: Reservation Key non-zero" " for SA REGISTER, returning CONFLICT\n"); return PYX_TRANSPORT_RESERVATION_CONFLICT; } /* * Do nothing but return GOOD status. */ - if (!(sa_res_key)) + if (!sa_res_key) return PYX_TRANSPORT_SENT_TO_TRANSPORT; - if (!(spec_i_pt)) { + if (!spec_i_pt) { /* * Perform the Service Action REGISTER on the Initiator * Port Endpoint that the PRO was received from on the @@ -2128,7 +2128,7 @@ static int core_scsi3_emulate_pro_register( sa_res_key, all_tg_pt, aptpl, ignore_key, 0); if (ret != 0) { - printk(KERN_ERR "Unable to allocate" + pr_err("Unable to allocate" " struct t10_pr_registration\n"); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -2149,10 +2149,10 @@ static int core_scsi3_emulate_pro_register( /* * Nothing left to do for the APTPL=0 case. */ - if (!(aptpl)) { + if (!aptpl) { pr_tmpl->pr_aptpl_active = 0; core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); - printk("SPC-3 PR: Set APTPL Bit Deactivated for" + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER\n"); return 0; } @@ -2167,9 +2167,9 @@ static int core_scsi3_emulate_pro_register( ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) { + if (!ret) { pr_tmpl->pr_aptpl_active = 1; - printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); + pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n"); } core_scsi3_put_pr_reg(pr_reg); @@ -2181,9 +2181,9 @@ static int core_scsi3_emulate_pro_register( pr_reg = pr_reg_e; type = pr_reg->pr_res_type; - if (!(ignore_key)) { + if (!ignore_key) { if (res_key != pr_reg->pr_res_key) { - printk(KERN_ERR "SPC-3 PR REGISTER: Received" + pr_err("SPC-3 PR REGISTER: Received" " res_key: 0x%016Lx does not match" " existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, @@ -2193,7 +2193,7 @@ static int core_scsi3_emulate_pro_register( } } if (spec_i_pt) { - printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT" + pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT" " set while sa_res_key=0\n"); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -2203,7 +2203,7 @@ static int core_scsi3_emulate_pro_register( * must also set ALL_TG_PT=1 in the incoming PROUT. */ if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) { - printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1" + pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1" " registration exists, but ALL_TG_PT=1 bit not" " present in received PROUT\n"); core_scsi3_put_pr_reg(pr_reg); @@ -2215,8 +2215,8 @@ static int core_scsi3_emulate_pro_register( if (aptpl) { pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL); - if (!(pr_aptpl_buf)) { - printk(KERN_ERR "Unable to allocate" + if (!pr_aptpl_buf) { + pr_err("Unable to allocate" " pr_aptpl_buf\n"); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_LU_COMM_FAILURE; @@ -2227,7 +2227,7 @@ static int core_scsi3_emulate_pro_register( * Nexus sa_res_key=1 Change Reservation Key for registered I_T * Nexus. */ - if (!(sa_res_key)) { + if (!sa_res_key) { pr_holder = core_scsi3_check_implict_release( cmd->se_dev, pr_reg); if (pr_holder < 0) { @@ -2246,7 +2246,7 @@ static int core_scsi3_emulate_pro_register( &pr_tmpl->registration_list, pr_reg_list) { - if (!(pr_reg_p->pr_reg_all_tg_pt)) + if (!pr_reg_p->pr_reg_all_tg_pt) continue; if (pr_reg_p->pr_res_key != res_key) @@ -2295,10 +2295,10 @@ static int core_scsi3_emulate_pro_register( } spin_unlock(&pr_tmpl->registration_lock); - if (!(aptpl)) { + if (!aptpl) { pr_tmpl->pr_aptpl_active = 0; core_scsi3_update_and_write_aptpl(dev, NULL, 0); - printk("SPC-3 PR: Set APTPL Bit Deactivated" + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" " for UNREGISTER\n"); return 0; } @@ -2306,9 +2306,9 @@ static int core_scsi3_emulate_pro_register( ret = core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) { + if (!ret) { pr_tmpl->pr_aptpl_active = 1; - printk("SPC-3 PR: Set APTPL Bit Activated" + pr_debug("SPC-3 PR: Set APTPL Bit Activated" " for UNREGISTER\n"); } @@ -2323,18 +2323,18 @@ static int core_scsi3_emulate_pro_register( pr_reg->pr_res_generation = core_scsi3_pr_generation( cmd->se_dev); pr_reg->pr_res_key = sa_res_key; - printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation" + pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation" " Key for %s to: 0x%016Lx PRgeneration:" " 0x%08x\n", cmd->se_tfo->get_fabric_name(), (ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "", pr_reg->pr_reg_nacl->initiatorname, pr_reg->pr_res_key, pr_reg->pr_res_generation); - if (!(aptpl)) { + if (!aptpl) { pr_tmpl->pr_aptpl_active = 0; core_scsi3_update_and_write_aptpl(dev, NULL, 0); core_scsi3_put_pr_reg(pr_reg); - printk("SPC-3 PR: Set APTPL Bit Deactivated" + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated" " for REGISTER\n"); return 0; } @@ -2342,9 +2342,9 @@ static int core_scsi3_emulate_pro_register( ret = core_scsi3_update_and_write_aptpl(dev, &pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) { + if (!ret) { pr_tmpl->pr_aptpl_active = 1; - printk("SPC-3 PR: Set APTPL Bit Activated" + pr_debug("SPC-3 PR: Set APTPL Bit Activated" " for REGISTER\n"); } @@ -2395,8 +2395,8 @@ static int core_scsi3_pro_reserve( memset(i_buf, 0, PR_REG_ISID_ID_LEN); - if (!(se_sess) || !(se_lun)) { - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } se_tpg = se_sess->se_tpg; @@ -2406,8 +2406,8 @@ static int core_scsi3_pro_reserve( */ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate" + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for RESERVE\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -2421,7 +2421,7 @@ static int core_scsi3_pro_reserve( * registered with the logical unit for the I_T nexus; and */ if (res_key != pr_reg->pr_res_key) { - printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx" + pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx" " does not match existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); @@ -2438,7 +2438,7 @@ static int core_scsi3_pro_reserve( * and that persistent reservation has a scope of LU_SCOPE. */ if (scope != PR_SCOPE_LU_SCOPE) { - printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -2462,7 +2462,7 @@ static int core_scsi3_pro_reserve( */ if (pr_res_holder != pr_reg) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; - printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" + pr_err("SPC-3 PR: Attempted RESERVE from" " [%s]: %s while reservation already held by" " [%s]: %s, returning RESERVATION_CONFLICT\n", cmd->se_tfo->get_fabric_name(), @@ -2484,7 +2484,7 @@ static int core_scsi3_pro_reserve( if ((pr_res_holder->pr_res_type != type) || (pr_res_holder->pr_res_scope != scope)) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; - printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from" + pr_err("SPC-3 PR: Attempted RESERVE from" " [%s]: %s trying to change TYPE and/or SCOPE," " while reservation already held by [%s]: %s," " returning RESERVATION_CONFLICT\n", @@ -2522,11 +2522,11 @@ static int core_scsi3_pro_reserve( prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0], PR_REG_ISID_ID_LEN); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new" + pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new" " reservation holder TYPE: %s ALL_TG_PT: %d\n", cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); - printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n", + pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n", cmd->se_tfo->get_fabric_name(), se_sess->se_node_acl->initiatorname, (prf_isid) ? &i_buf[0] : ""); @@ -2536,8 +2536,8 @@ static int core_scsi3_pro_reserve( ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" + if (!ret) + pr_debug("SPC-3 PR: Updated APTPL metadata" " for RESERVE\n"); } @@ -2564,7 +2564,7 @@ static int core_scsi3_emulate_pro_reserve( ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key); break; default: - printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:" + pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:" " 0x%02x\n", type); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -2593,12 +2593,12 @@ static void __core_scsi3_complete_pro_release( */ dev->dev_pr_res_holder = NULL; - printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared" + pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared" " reservation holder TYPE: %s ALL_TG_PT: %d\n", tfo->get_fabric_name(), (explict) ? "explict" : "implict", core_scsi3_pr_dump_type(pr_reg->pr_res_type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); - printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n", + pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n", tfo->get_fabric_name(), se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); /* @@ -2620,16 +2620,16 @@ static int core_scsi3_emulate_pro_release( struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr; int ret, all_reg = 0; - if (!(se_sess) || !(se_lun)) { - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } /* * Locate the existing *pr_reg via struct se_node_acl pointers */ pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate" + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for RELEASE\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -2647,7 +2647,7 @@ static int core_scsi3_emulate_pro_release( */ spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; - if (!(pr_res_holder)) { + if (!pr_res_holder) { /* * No persistent reservation, return GOOD status. */ @@ -2684,7 +2684,7 @@ static int core_scsi3_emulate_pro_release( * that is registered with the logical unit for the I_T nexus; */ if (res_key != pr_reg->pr_res_key) { - printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx" + pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx" " does not match existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg->pr_res_key); spin_unlock(&dev->dev_reservation_lock); @@ -2700,7 +2700,7 @@ static int core_scsi3_emulate_pro_release( if ((pr_res_holder->pr_res_type != type) || (pr_res_holder->pr_res_scope != scope)) { struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl; - printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release" + pr_err("SPC-3 PR RELEASE: Attempted to release" " reservation from [%s]: %s with different TYPE " "and/or SCOPE while reservation already held by" " [%s]: %s, returning RESERVATION_CONFLICT\n", @@ -2767,8 +2767,8 @@ write_aptpl: ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); + if (!ret) + pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n"); } core_scsi3_put_pr_reg(pr_reg); @@ -2791,8 +2791,8 @@ static int core_scsi3_emulate_pro_clear( */ pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg_n)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate" + if (!pr_reg_n) { + pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for CLEAR\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -2808,7 +2808,7 @@ static int core_scsi3_emulate_pro_clear( * that is registered with the logical unit for the I_T nexus. */ if (res_key != pr_reg_n->pr_res_key) { - printk(KERN_ERR "SPC-3 PR REGISTER: Received" + pr_err("SPC-3 PR REGISTER: Received" " res_key: 0x%016Lx does not match" " existing SA REGISTER res_key:" " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); @@ -2845,18 +2845,18 @@ static int core_scsi3_emulate_pro_clear( * command with CLEAR service action was received, with the * additional sense code set to RESERVATIONS PREEMPTED. */ - if (!(calling_it_nexus)) + if (!calling_it_nexus) core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); } spin_unlock(&pr_tmpl->registration_lock); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n", + pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n", cmd->se_tfo->get_fabric_name()); if (pr_tmpl->pr_aptpl_active) { core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); - printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata" + pr_debug("SPC-3 PR: Updated APTPL metadata" " for CLEAR\n"); } @@ -2895,12 +2895,12 @@ static void __core_scsi3_complete_pro_preempt( pr_reg->pr_res_type = type; pr_reg->pr_res_scope = scope; - printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new" + pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new" " reservation holder TYPE: %s ALL_TG_PT: %d\n", tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", core_scsi3_pr_dump_type(type), (pr_reg->pr_reg_all_tg_pt) ? 1 : 0); - printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", + pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "", nacl->initiatorname, (prf_isid) ? &i_buf[0] : ""); /* @@ -2926,7 +2926,7 @@ static void core_scsi3_release_preempt_and_abort( if (pr_reg_holder == pr_reg) continue; if (pr_reg->pr_res_holder) { - printk(KERN_WARNING "pr_reg->pr_res_holder still set\n"); + pr_warn("pr_reg->pr_res_holder still set\n"); continue; } @@ -2971,14 +2971,14 @@ static int core_scsi3_pro_preempt( int all_reg = 0, calling_it_nexus = 0, released_regs = 0; int prh_type = 0, prh_scope = 0, ret; - if (!(se_sess)) + if (!se_sess) return PYX_TRANSPORT_LU_COMM_FAILURE; se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun]; pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg_n)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate" + if (!pr_reg_n) { + pr_err("SPC-3 PR: Unable to locate" " PR_REGISTERED *pr_reg for PREEMPT%s\n", (abort) ? "_AND_ABORT" : ""); return PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -2988,7 +2988,7 @@ static int core_scsi3_pro_preempt( return PYX_TRANSPORT_RESERVATION_CONFLICT; } if (scope != PR_SCOPE_LU_SCOPE) { - printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); + pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg_n); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -3001,7 +3001,7 @@ static int core_scsi3_pro_preempt( (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) all_reg = 1; - if (!(all_reg) && !(sa_res_key)) { + if (!all_reg && !sa_res_key) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -3015,7 +3015,7 @@ static int core_scsi3_pro_preempt( * server shall perform a preempt by doing the following in an * uninterrupted series of actions. (See below..) */ - if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) { + if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) { /* * No existing or SA Reservation Key matching reservations.. * @@ -3042,7 +3042,7 @@ static int core_scsi3_pro_preempt( * was received, with the additional sense code set * to REGISTRATIONS PREEMPTED. */ - if (!(all_reg)) { + if (!all_reg) { if (pr_reg->pr_res_key != sa_res_key) continue; @@ -3082,7 +3082,7 @@ static int core_scsi3_pro_preempt( NULL, 0); released_regs++; } - if (!(calling_it_nexus)) + if (!calling_it_nexus) core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED); @@ -3095,7 +3095,7 @@ static int core_scsi3_pro_preempt( * registered reservation key, then the device server shall * complete the command with RESERVATION CONFLICT status. */ - if (!(released_regs)) { + if (!released_regs) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -3120,8 +3120,8 @@ static int core_scsi3_pro_preempt( ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk(KERN_INFO "SPC-3 PR: Updated APTPL" + if (!ret) + pr_debug("SPC-3 PR: Updated APTPL" " metadata for PREEMPT%s\n", (abort) ? "_AND_ABORT" : ""); } @@ -3256,8 +3256,8 @@ static int core_scsi3_pro_preempt( ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &pr_reg_n->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk("SPC-3 PR: Updated APTPL metadata for PREEMPT" + if (!ret) + pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT" "%s\n", (abort) ? "_AND_ABORT" : ""); } @@ -3287,7 +3287,7 @@ static int core_scsi3_emulate_pro_preempt( res_key, sa_res_key, abort); break; default: - printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s" + pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s" " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -3321,8 +3321,8 @@ static int core_scsi3_emulate_pro_register_and_move( unsigned short rtpi; unsigned char proto_ident; - if (!(se_sess) || !(se_lun)) { - printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + if (!se_sess || !se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } memset(dest_iport, 0, 64); @@ -3338,8 +3338,8 @@ static int core_scsi3_emulate_pro_register_and_move( */ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); - if (!(pr_reg)) { - printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED" + if (!pr_reg) { + pr_err("SPC-3 PR: Unable to locate PR_REGISTERED" " *pr_reg for REGISTER_AND_MOVE\n"); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -3348,7 +3348,7 @@ static int core_scsi3_emulate_pro_register_and_move( * provided during this initiator's I_T nexus registration. */ if (res_key != pr_reg->pr_res_key) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received" + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received" " res_key: 0x%016Lx does not match existing SA REGISTER" " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); core_scsi3_put_pr_reg(pr_reg); @@ -3357,8 +3357,8 @@ static int core_scsi3_emulate_pro_register_and_move( /* * The service active reservation key needs to be non zero */ - if (!(sa_res_key)) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero" + if (!sa_res_key) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero" " sa_res_key\n"); core_scsi3_put_pr_reg(pr_reg); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -3380,7 +3380,7 @@ static int core_scsi3_emulate_pro_register_and_move( buf = NULL; if ((tid_len + 24) != cmd->data_length) { - printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header" + pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header" " does not equal CDB data_length: %u\n", tid_len, cmd->data_length); core_scsi3_put_pr_reg(pr_reg); @@ -3392,10 +3392,10 @@ static int core_scsi3_emulate_pro_register_and_move( if (se_port->sep_rtpi != rtpi) continue; dest_se_tpg = se_port->sep_tpg; - if (!(dest_se_tpg)) + if (!dest_se_tpg) continue; dest_tf_ops = dest_se_tpg->se_tpg_tfo; - if (!(dest_tf_ops)) + if (!dest_tf_ops) continue; atomic_inc(&dest_se_tpg->tpg_pr_ref_count); @@ -3404,7 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move( ret = core_scsi3_tpg_depend_item(dest_se_tpg); if (ret != 0) { - printk(KERN_ERR "core_scsi3_tpg_depend_item() failed" + pr_err("core_scsi3_tpg_depend_item() failed" " for dest_se_tpg\n"); atomic_dec(&dest_se_tpg->tpg_pr_ref_count); smp_mb__after_atomic_dec(); @@ -3417,8 +3417,8 @@ static int core_scsi3_emulate_pro_register_and_move( } spin_unlock(&dev->se_port_lock); - if (!(dest_se_tpg) || (!dest_tf_ops)) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + if (!dest_se_tpg || !dest_tf_ops) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " fabric ops from Relative Target Port Identifier:" " %hu\n", rtpi); core_scsi3_put_pr_reg(pr_reg); @@ -3428,11 +3428,11 @@ static int core_scsi3_emulate_pro_register_and_move( buf = transport_kmap_first_data_page(cmd); proto_ident = (buf[24] & 0x0f); #if 0 - printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" " 0x%02x\n", proto_ident); #endif if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received" + pr_err("SPC-3 PR REGISTER_AND_MOVE: Received" " proto_ident: 0x%02x does not match ident: 0x%02x" " from fabric: %s\n", proto_ident, dest_tf_ops->get_fabric_proto_ident(dest_se_tpg), @@ -3441,7 +3441,7 @@ static int core_scsi3_emulate_pro_register_and_move( goto out; } if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not" + pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not" " containg a valid tpg_parse_pr_out_transport_id" " function pointer\n"); ret = PYX_TRANSPORT_LU_COMM_FAILURE; @@ -3449,8 +3449,8 @@ static int core_scsi3_emulate_pro_register_and_move( } initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg, (const char *)&buf[24], &tmp_tid_len, &iport_ptr); - if (!(initiator_str)) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate" + if (!initiator_str) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate" " initiator_str from Transport ID\n"); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; goto out; @@ -3459,7 +3459,7 @@ static int core_scsi3_emulate_pro_register_and_move( transport_kunmap_first_data_page(cmd); buf = NULL; - printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s" + pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" " %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ? "port" : "device", initiator_str, (iport_ptr != NULL) ? iport_ptr : ""); @@ -3474,18 +3474,18 @@ static int core_scsi3_emulate_pro_register_and_move( pr_reg_nacl = pr_reg->pr_reg_nacl; matching_iname = (!strcmp(initiator_str, pr_reg_nacl->initiatorname)) ? 1 : 0; - if (!(matching_iname)) + if (!matching_iname) goto after_iport_check; - if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" + if (!iport_ptr || !pr_reg->isid_present_at_reg) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s" " matches: %s on received I_T Nexus\n", initiator_str, pr_reg_nacl->initiatorname); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; goto out; } - if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) { - printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" + if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) { + pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s" " matches: %s %s on received I_T Nexus\n", initiator_str, iport_ptr, pr_reg_nacl->initiatorname, pr_reg->pr_reg_isid); @@ -3505,8 +3505,8 @@ after_iport_check: } spin_unlock_bh(&dest_se_tpg->acl_node_lock); - if (!(dest_node_acl)) { - printk(KERN_ERR "Unable to locate %s dest_node_acl for" + if (!dest_node_acl) { + pr_err("Unable to locate %s dest_node_acl for" " TransportID%s\n", dest_tf_ops->get_fabric_name(), initiator_str); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; @@ -3514,7 +3514,7 @@ after_iport_check: } ret = core_scsi3_nodeacl_depend_item(dest_node_acl); if (ret != 0) { - printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for" + pr_err("core_scsi3_nodeacl_depend_item() for" " dest_node_acl\n"); atomic_dec(&dest_node_acl->acl_pr_ref_count); smp_mb__after_atomic_dec(); @@ -3523,7 +3523,7 @@ after_iport_check: goto out; } #if 0 - printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:" " %s from TransportID\n", dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname); #endif @@ -3532,8 +3532,8 @@ after_iport_check: * PORT IDENTIFIER. */ dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi); - if (!(dest_se_deve)) { - printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:" + if (!dest_se_deve) { + pr_err("Unable to locate %s dest_se_deve from RTPI:" " %hu\n", dest_tf_ops->get_fabric_name(), rtpi); ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST; goto out; @@ -3541,7 +3541,7 @@ after_iport_check: ret = core_scsi3_lunacl_depend_item(dest_se_deve); if (ret < 0) { - printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n"); + pr_err("core_scsi3_lunacl_depend_item() failed\n"); atomic_dec(&dest_se_deve->pr_ref_count); smp_mb__after_atomic_dec(); dest_se_deve = NULL; @@ -3549,7 +3549,7 @@ after_iport_check: goto out; } #if 0 - printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN" " ACL for dest_se_deve->mapped_lun: %u\n", dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname, dest_se_deve->mapped_lun); @@ -3560,8 +3560,8 @@ after_iport_check: */ spin_lock(&dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; - if (!(pr_res_holder)) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation" + if (!pr_res_holder) { + pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation" " currently held\n"); spin_unlock(&dev->dev_reservation_lock); ret = PYX_TRANSPORT_INVALID_CDB_FIELD; @@ -3574,7 +3574,7 @@ after_iport_check: * Register behaviors for a REGISTER AND MOVE service action */ if (pr_res_holder != pr_reg) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T" + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T" " Nexus is not reservation holder\n"); spin_unlock(&dev->dev_reservation_lock); ret = PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -3591,7 +3591,7 @@ after_iport_check: */ if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) || (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) { - printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move" + pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move" " reservation for type: %s\n", core_scsi3_pr_dump_type(pr_res_holder->pr_res_type)); spin_unlock(&dev->dev_reservation_lock); @@ -3626,7 +3626,7 @@ after_iport_check: */ dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, iport_ptr); - if (!(dest_pr_reg)) { + if (!dest_pr_reg) { ret = core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_se_deve, iport_ptr, sa_res_key, 0, aptpl, 2, 1); @@ -3659,16 +3659,16 @@ after_iport_check: /* * Increment PRGeneration for existing registrations.. */ - if (!(new_reg)) + if (!new_reg) dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++; spin_unlock(&dev->dev_reservation_lock); - printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" + pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE" " created new reservation holder TYPE: %s on object RTPI:" " %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(), core_scsi3_pr_dump_type(type), rtpi, dest_pr_reg->pr_res_generation); - printk(KERN_INFO "SPC-3 PR Successfully moved reservation from" + pr_debug("SPC-3 PR Successfully moved reservation from" " %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(), @@ -3696,18 +3696,18 @@ after_iport_check: * Clear the APTPL metadata if APTPL has been disabled, otherwise * write out the updated metadata to struct file for this SCSI device. */ - if (!(aptpl)) { + if (!aptpl) { pr_tmpl->pr_aptpl_active = 0; core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0); - printk("SPC-3 PR: Set APTPL Bit Deactivated for" + pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for" " REGISTER_AND_MOVE\n"); } else { pr_tmpl->pr_aptpl_active = 1; ret = core_scsi3_update_and_write_aptpl(cmd->se_dev, &dest_pr_reg->pr_aptpl_buf[0], pr_tmpl->pr_aptpl_buf_len); - if (!(ret)) - printk("SPC-3 PR: Set APTPL Bit Activated for" + if (!ret) + pr_debug("SPC-3 PR: Set APTPL Bit Activated for" " REGISTER_AND_MOVE\n"); } @@ -3750,11 +3750,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) * FIXME: A NULL struct se_session pointer means an this is not coming from * a $FABRIC_MOD's nexus, but from internal passthrough ops. */ - if (!(cmd->se_sess)) + if (!cmd->se_sess) return PYX_TRANSPORT_LU_COMM_FAILURE; if (cmd->data_length < 24) { - printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list" + pr_warn("SPC-PR: Received PR OUT parameter list" " length too small: %u\n", cmd->data_length); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -3800,9 +3800,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) * the sense key set to ILLEGAL REQUEST, and the additional sense * code set to PARAMETER LIST LENGTH ERROR. */ - if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && + if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && (cmd->data_length != 24)) { - printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter" + pr_warn("SPC-PR: Received PR OUT illegal parameter" " list length: %u\n", cmd->data_length); return PYX_TRANSPORT_INVALID_PARAMETER_LIST; } @@ -3836,7 +3836,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb) return core_scsi3_emulate_pro_register_and_move(cmd, res_key, sa_res_key, aptpl, unreg); default: - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service" + pr_err("Unknown PERSISTENT_RESERVE_OUT service" " action: 0x%02x\n", cdb[1] & 0x1f); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -3858,7 +3858,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) u32 add_len = 0, off = 8; if (cmd->data_length < 8) { - printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u" + pr_err("PRIN SA READ_KEYS SCSI Data Length: %u" " too small\n", cmd->data_length); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -3917,7 +3917,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */ if (cmd->data_length < 8) { - printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u" + pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u" " too small\n", cmd->data_length); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -3999,7 +3999,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) u16 add_len = 8; /* Hardcoded to 8. */ if (cmd->data_length < 6) { - printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:" + pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:" " %u too small\n", cmd->data_length); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -4060,7 +4060,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) int format_code = 0; if (cmd->data_length < 8) { - printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u" + pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u" " too small\n", cmd->data_length); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -4091,7 +4091,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) se_tpg, se_nacl, pr_reg, &format_code); if ((exp_desc_len + add_len) > cmd->data_length) { - printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran" + pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" " out of buffer: %d\n", cmd->data_length); spin_lock(&pr_tmpl->registration_lock); atomic_dec(&pr_reg->pr_res_holders); @@ -4141,7 +4141,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) * bit is set to one, the contents of the RELATIVE TARGET PORT * IDENTIFIER field are not defined by this standard. */ - if (!(pr_reg->pr_reg_all_tg_pt)) { + if (!pr_reg->pr_reg_all_tg_pt) { struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep; buf[off++] = ((port->sep_rtpi >> 8) & 0xff); @@ -4203,7 +4203,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb) case PRI_READ_FULL_STATUS: return core_scsi3_pri_read_full_status(cmd); default: - printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service" + pr_err("Unknown PERSISTENT_RESERVE_IN service" " action: 0x%02x\n", cdb[1] & 0x1f); return PYX_TRANSPORT_INVALID_CDB_FIELD; } @@ -4224,7 +4224,7 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd) * CONFLICT status. */ if (dev->dev_flags & DF_SPC2_RESERVATIONS) { - printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy" + pr_err("Received PERSISTENT_RESERVE CDB while legacy" " SPC-2 reservation is held, returning" " RESERVATION_CONFLICT\n"); return PYX_TRANSPORT_RESERVATION_CONFLICT; @@ -4263,7 +4263,7 @@ int core_setup_reservations(struct se_device *dev, int force_pt) rest->res_type = SPC_PASSTHROUGH; rest->pr_ops.t10_reservation_check = &core_pt_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder; - printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation" + pr_debug("%s: Using SPC_PASSTHROUGH, no reservation" " emulation\n", dev->transport->name); return 0; } @@ -4275,14 +4275,14 @@ int core_setup_reservations(struct se_device *dev, int force_pt) rest->res_type = SPC3_PERSISTENT_RESERVATIONS; rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder; - printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS" + pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS" " emulation\n", dev->transport->name); } else { rest->res_type = SPC2_RESERVATIONS; rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check; rest->pr_ops.t10_seq_non_holder = &core_scsi2_reservation_seq_non_holder; - printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n", + pr_debug("%s: Using SPC2_RESERVATIONS emulation\n", dev->transport->name); } diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 318ef14fe37d..a2ce5998d318 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -65,8 +65,8 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) struct pscsi_hba_virt *phv; phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); - if (!(phv)) { - printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); + if (!phv) { + pr_err("Unable to allocate struct pscsi_hba_virt\n"); return -ENOMEM; } phv->phv_host_id = host_id; @@ -74,10 +74,10 @@ static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) hba->hba_ptr = phv; - printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" + pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, PSCSI_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n", + pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n", hba->hba_id); return 0; @@ -91,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba) if (scsi_host) { scsi_host_put(scsi_host); - printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" + pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from" " Generic Target Core\n", hba->hba_id, (scsi_host->hostt->name) ? (scsi_host->hostt->name) : "Unknown"); } else - printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" + pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA" " from Generic Target Core\n", hba->hba_id); kfree(phv); @@ -110,14 +110,14 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) /* * Release the struct Scsi_Host */ - if (!(mode_flag)) { - if (!(sh)) + if (!mode_flag) { + if (!sh) return 0; phv->phv_lld_host = NULL; phv->phv_mode = PHV_VIRUTAL_HOST_ID; - printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" + pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" " %s\n", hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); @@ -130,7 +130,7 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) */ sh = scsi_host_lookup(phv->phv_host_id); if (IS_ERR(sh)) { - printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" + pr_err("pSCSI: Unable to locate SCSI Host for" " phv_host_id: %d\n", phv->phv_host_id); return PTR_ERR(sh); } @@ -138,7 +138,7 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) phv->phv_lld_host = sh; phv->phv_mode = PHV_LLD_SCSI_HOST_NO; - printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", + pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); return 1; @@ -257,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, page_83 = &buf[off]; ident_len = page_83[3]; if (!ident_len) { - printk(KERN_ERR "page_83[3]: identifier" + pr_err("page_83[3]: identifier" " length zero!\n"); break; } - printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); + pr_debug("T10 VPD Identifer Length: %d\n", ident_len); vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); if (!vpd) { - printk(KERN_ERR "Unable to allocate memory for" + pr_err("Unable to allocate memory for" " struct t10_vpd\n"); goto out; } @@ -317,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list( if (!sd->queue_depth) { sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; - printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" + pr_err("Set broken SCSI Device %d:%d:%d" " queue_depth to %d\n", sd->channel, sd->id, sd->lun, sd->queue_depth); } @@ -355,7 +355,7 @@ static struct se_device *pscsi_add_device_to_list( dev = transport_add_device_to_core_hba(hba, &pscsi_template, se_dev, dev_flags, pdv, &dev_limits, NULL, NULL); - if (!(dev)) { + if (!dev) { pdv->pdv_sd = NULL; return NULL; } @@ -385,13 +385,13 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) struct pscsi_dev_virt *pdv; pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); - if (!(pdv)) { - printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); + if (!pdv) { + pr_err("Unable to allocate memory for struct pscsi_dev_virt\n"); return NULL; } pdv->pdv_se_hba = hba; - printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); + pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name); return pdv; } @@ -412,7 +412,7 @@ static struct se_device *pscsi_create_type_disk( u32 dev_flags = 0; if (scsi_device_get(sd)) { - printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", + pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", sh->host_no, sd->channel, sd->id, sd->lun); spin_unlock_irq(sh->host_lock); return NULL; @@ -425,19 +425,19 @@ static struct se_device *pscsi_create_type_disk( bd = blkdev_get_by_path(se_dev->se_dev_udev_path, FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); if (IS_ERR(bd)) { - printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); + pr_err("pSCSI: blkdev_get_by_path() failed\n"); scsi_device_put(sd); return NULL; } pdv->pdv_bd = bd; dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); - if (!(dev)) { + if (!dev) { blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); scsi_device_put(sd); return NULL; } - printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", + pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); return dev; @@ -459,7 +459,7 @@ static struct se_device *pscsi_create_type_rom( u32 dev_flags = 0; if (scsi_device_get(sd)) { - printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", + pr_err("scsi_device_get() failed for %d:%d:%d:%d\n", sh->host_no, sd->channel, sd->id, sd->lun); spin_unlock_irq(sh->host_lock); return NULL; @@ -467,11 +467,11 @@ static struct se_device *pscsi_create_type_rom( spin_unlock_irq(sh->host_lock); dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); - if (!(dev)) { + if (!dev) { scsi_device_put(sd); return NULL; } - printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", + pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, sd->channel, sd->id, sd->lun); @@ -495,10 +495,10 @@ static struct se_device *pscsi_create_type_other( spin_unlock_irq(sh->host_lock); dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); - if (!(dev)) + if (!dev) return NULL; - printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", + pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, sd->channel, sd->id, sd->lun); @@ -517,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice( struct Scsi_Host *sh = phv->phv_lld_host; int legacy_mode_enable = 0; - if (!(pdv)) { - printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" + if (!pdv) { + pr_err("Unable to locate struct pscsi_dev_virt" " parameter\n"); return ERR_PTR(-EINVAL); } @@ -526,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice( * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the * struct Scsi_Host we will need to bring the TCM/pSCSI object online */ - if (!(sh)) { + if (!sh) { if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { - printk(KERN_ERR "pSCSI: Unable to locate struct" + pr_err("pSCSI: Unable to locate struct" " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); return ERR_PTR(-ENODEV); } @@ -537,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice( * reference, we enforce that udev_path has been set */ if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { - printk(KERN_ERR "pSCSI: udev_path attribute has not" + pr_err("pSCSI: udev_path attribute has not" " been set before ENABLE=1\n"); return ERR_PTR(-EINVAL); } @@ -548,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice( */ if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { spin_lock(&hba->device_lock); - if (!(list_empty(&hba->hba_dev_list))) { - printk(KERN_ERR "pSCSI: Unable to set hba_mode" + if (!list_empty(&hba->hba_dev_list)) { + pr_err("pSCSI: Unable to set hba_mode" " with active devices\n"); spin_unlock(&hba->device_lock); return ERR_PTR(-EEXIST); @@ -565,14 +565,14 @@ static struct se_device *pscsi_create_virtdevice( } else { sh = scsi_host_lookup(pdv->pdv_host_id); if (IS_ERR(sh)) { - printk(KERN_ERR "pSCSI: Unable to locate" + pr_err("pSCSI: Unable to locate" " pdv_host_id: %d\n", pdv->pdv_host_id); return (struct se_device *) sh; } } } else { if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { - printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" + pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while" " struct Scsi_Host exists\n"); return ERR_PTR(-EEXIST); } @@ -601,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice( break; } - if (!(dev)) { + if (!dev) { if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) scsi_host_put(sh); else if (legacy_mode_enable) { @@ -615,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice( } spin_unlock_irq(sh->host_lock); - printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, + pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) @@ -729,8 +729,8 @@ after_mode_sense: u32 blocksize; buf = sg_virt(&sg[0]); - if (!(buf)) { - printk(KERN_ERR "Unable to get buf for scatterlist\n"); + if (!buf) { + pr_err("Unable to get buf for scatterlist\n"); goto after_mode_select; } @@ -760,34 +760,20 @@ after_mode_select: } static struct se_task * -pscsi_alloc_task(struct se_cmd *cmd) +pscsi_alloc_task(unsigned char *cdb) { struct pscsi_plugin_task *pt; - unsigned char *cdb = cmd->t_task_cdb; - pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); + /* + * Dynamically alloc cdb space, since it may be larger than + * TCM_MAX_COMMAND_SIZE + */ + pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL); if (!pt) { - printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); + pr_err("Unable to allocate struct pscsi_plugin_task\n"); return NULL; } - /* - * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, - * allocate the extended CDB buffer for per struct se_task context - * pt->pscsi_cdb now. - */ - if (cmd->t_task_cdb != cmd->__t_task_cdb) { - - pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(pt->pscsi_cdb)) { - printk(KERN_ERR "pSCSI: Unable to allocate extended" - " pt->pscsi_cdb\n"); - kfree(pt); - return NULL; - } - } else - pt->pscsi_cdb = &pt->__pscsi_cdb[0]; - return &pt->pscsi_task; } @@ -837,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task) pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, (task->task_data_direction == DMA_TO_DEVICE), GFP_KERNEL); - if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { - printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", + if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { + pr_err("PSCSI: blk_get_request() failed: %ld\n", IS_ERR(pt->pscsi_req)); return PYX_TRANSPORT_LU_COMM_FAILURE; } @@ -883,14 +869,7 @@ static int pscsi_do_task(struct se_task *task) static void pscsi_free_task(struct se_task *task) { struct pscsi_plugin_task *pt = PSCSI_TASK(task); - struct se_cmd *cmd = task->task_se_cmd; - /* - * Release the extended CDB allocation from pscsi_alloc_task() - * if one exists. - */ - if (cmd->t_task_cdb != cmd->__t_task_cdb) - kfree(pt->pscsi_cdb); /* * We do not release the bio(s) here associated with this task, as * this is handled by bio_put() and pscsi_bi_endio(). @@ -936,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, switch (token) { case Opt_scsi_host_id: if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { - printk(KERN_ERR "PSCSI[%d]: Unable to accept" + pr_err("PSCSI[%d]: Unable to accept" " scsi_host_id while phv_mode ==" " PHV_LLD_SCSI_HOST_NO\n", phv->phv_host_id); @@ -945,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, } match_int(args, &arg); pdv->pdv_host_id = arg; - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" + pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" " %d\n", phv->phv_host_id, pdv->pdv_host_id); pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; break; case Opt_scsi_channel_id: match_int(args, &arg); pdv->pdv_channel_id = arg; - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" + pr_debug("PSCSI[%d]: Referencing SCSI Channel" " ID: %d\n", phv->phv_host_id, pdv->pdv_channel_id); pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; @@ -960,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, case Opt_scsi_target_id: match_int(args, &arg); pdv->pdv_target_id = arg; - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" + pr_debug("PSCSI[%d]: Referencing SCSI Target" " ID: %d\n", phv->phv_host_id, pdv->pdv_target_id); pdv->pdv_flags |= PDF_HAS_TARGET_ID; @@ -968,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, case Opt_scsi_lun_id: match_int(args, &arg); pdv->pdv_lun_id = arg; - printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" + pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" " %d\n", phv->phv_host_id, pdv->pdv_lun_id); pdv->pdv_flags |= PDF_HAS_LUN_ID; break; @@ -991,7 +970,7 @@ static ssize_t pscsi_check_configfs_dev_params( if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { - printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" + pr_err("Missing scsi_channel_id=, scsi_target_id= and" " scsi_lun_id= parameters\n"); return -EINVAL; } @@ -1061,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(int sg_num) * in block/blk-core.c:blk_make_request() */ bio = bio_kmalloc(GFP_KERNEL, sg_num); - if (!(bio)) { - printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); + if (!bio) { + pr_err("PSCSI: bio_kmalloc() failed\n"); return NULL; } bio->bi_end_io = pscsi_bi_endio; @@ -1070,12 +1049,6 @@ static inline struct bio *pscsi_get_bio(int sg_num) return bio; } -#if 0 -#define DEBUG_PSCSI(x...) printk(x) -#else -#define DEBUG_PSCSI(x...) -#endif - static int __pscsi_map_task_SG( struct se_task *task, struct scatterlist *task_sg, @@ -1106,34 +1079,34 @@ static int __pscsi_map_task_SG( * is ported to upstream SCSI passthrough functionality that accepts * struct scatterlist->page_link or struct page as a paraemeter. */ - DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); + pr_debug("PSCSI: nr_pages: %d\n", nr_pages); for_each_sg(task_sg, sg, task_sg_num, i) { page = sg_page(sg); off = sg->offset; len = sg->length; - DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, + pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, page, len, off); while (len > 0 && data_len > 0) { bytes = min_t(unsigned int, len, PAGE_SIZE - off); bytes = min(bytes, data_len); - if (!(bio)) { + if (!bio) { nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); nr_pages -= nr_vecs; /* * Calls bio_kmalloc() and sets bio->bi_end_io() */ bio = pscsi_get_bio(nr_vecs); - if (!(bio)) + if (!bio) goto fail; if (rw) bio->bi_rw |= REQ_WRITE; - DEBUG_PSCSI("PSCSI: Allocated bio: %p," + pr_debug("PSCSI: Allocated bio: %p," " dir: %s nr_vecs: %d\n", bio, (rw) ? "rw" : "r", nr_vecs); /* @@ -1148,7 +1121,7 @@ static int __pscsi_map_task_SG( tbio = tbio->bi_next = bio; } - DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" + pr_debug("PSCSI: Calling bio_add_pc_page() i: %d" " bio: %p page: %p len: %d off: %d\n", i, bio, page, len, off); @@ -1157,11 +1130,11 @@ static int __pscsi_map_task_SG( if (rc != bytes) goto fail; - DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", + pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", bio->bi_vcnt, nr_vecs); if (bio->bi_vcnt > nr_vecs) { - DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" + pr_debug("PSCSI: Reached bio->bi_vcnt max:" " %d i: %d bio: %p, allocating another" " bio\n", bio->bi_vcnt, i, bio); /* @@ -1183,15 +1156,15 @@ static int __pscsi_map_task_SG( * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] */ - if (!(bidi_read)) { + if (!bidi_read) { /* * Starting with v2.6.31, call blk_make_request() passing in *hbio to * allocate the pSCSI task a struct request. */ pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); - if (!(pt->pscsi_req)) { - printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); + if (!pt->pscsi_req) { + pr_err("pSCSI: blk_make_request() failed\n"); goto fail; } /* @@ -1200,7 +1173,7 @@ static int __pscsi_map_task_SG( */ pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); - return task->task_sg_num; + return task->task_sg_nents; } /* * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND @@ -1208,13 +1181,13 @@ static int __pscsi_map_task_SG( */ pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, hbio, GFP_KERNEL); - if (!(pt->pscsi_req->next_rq)) { - printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); + if (!pt->pscsi_req->next_rq) { + pr_err("pSCSI: blk_make_request() failed for BIDI\n"); goto fail; } pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); - return task->task_sg_num; + return task->task_sg_nents; fail: while (hbio) { bio = hbio; @@ -1233,14 +1206,14 @@ static int pscsi_map_task_SG(struct se_task *task) * Setup the main struct request for the task->task_sg[] payload */ - ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); + ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_nents, 0); if (ret >= 0 && task->task_sg_bidi) { /* * If present, set up the extra BIDI-COMMAND SCSI READ * struct request and payload. */ ret = __pscsi_map_task_SG(task, task->task_sg_bidi, - task->task_sg_num, 1); + task->task_sg_nents, 1); } if (ret < 0) @@ -1319,9 +1292,9 @@ static inline void pscsi_process_SAM_status( struct pscsi_plugin_task *pt) { task->task_scsi_status = status_byte(pt->pscsi_result); - if ((task->task_scsi_status)) { + if (task->task_scsi_status) { task->task_scsi_status <<= 1; - printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" + pr_debug("PSCSI Status Byte exception at task: %p CDB:" " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], pt->pscsi_result); } @@ -1331,7 +1304,7 @@ static inline void pscsi_process_SAM_status( transport_complete_task(task, (!task->task_scsi_status)); break; default: - printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" + pr_debug("PSCSI Host Byte exception at task: %p CDB:" " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], pt->pscsi_result); task->task_scsi_status = SAM_STAT_CHECK_CONDITION; diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 280b689379c3..ebf4f1ae2c83 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -23,13 +23,12 @@ struct pscsi_plugin_task { struct se_task pscsi_task; - unsigned char *pscsi_cdb; - unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE]; unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE]; int pscsi_direction; int pscsi_result; u32 pscsi_resid; struct request *pscsi_req; + unsigned char pscsi_cdb[0]; } ____cacheline_aligned; #define PDF_HAS_CHANNEL_ID 0x01 diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 4f9416d5c028..3dd81d24d9a9 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -44,12 +44,8 @@ #include "target_core_rd.h" -static struct se_subsystem_api rd_dr_template; static struct se_subsystem_api rd_mcp_template; -/* #define DEBUG_RAMDISK_MCP */ -/* #define DEBUG_RAMDISK_DR */ - /* rd_attach_hba(): (Part of se_subsystem_api_t template) * * @@ -59,8 +55,8 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) struct rd_host *rd_host; rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); - if (!(rd_host)) { - printk(KERN_ERR "Unable to allocate memory for struct rd_host\n"); + if (!rd_host) { + pr_err("Unable to allocate memory for struct rd_host\n"); return -ENOMEM; } @@ -68,10 +64,10 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id) hba->hba_ptr = rd_host; - printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" + pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" " Generic Target Core Stack %s\n", hba->hba_id, RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); - printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" + pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" " MaxSectors: %u\n", hba->hba_id, rd_host->rd_host_id, RD_MAX_SECTORS); @@ -82,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba) { struct rd_host *rd_host = hba->hba_ptr; - printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from" + pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from" " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); kfree(rd_host); @@ -111,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev) for (j = 0; j < sg_per_table; j++) { pg = sg_page(&sg[j]); - if ((pg)) { + if (pg) { __free_page(pg); page_count++; } @@ -120,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev) kfree(sg); } - printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk" + pr_debug("CORE_RD[%u] - Released device space for Ramdisk" " Device ID: %u, pages %u in %u tables total bytes %lu\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); @@ -145,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) struct scatterlist *sg; if (rd_dev->rd_page_count <= 0) { - printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", + pr_err("Illegal page count: %u for Ramdisk device\n", rd_dev->rd_page_count); return -EINVAL; } @@ -154,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev) sg_tables = (total_sg_needed / max_sg_per_table) + 1; sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); - if (!(sg_table)) { - printk(KERN_ERR "Unable to allocate memory for Ramdisk" + if (!sg_table) { + pr_err("Unable to allocate memory for Ramdisk" " scatterlist tables\n"); return -ENOMEM; } @@ -169,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev) sg = kzalloc(sg_per_table * sizeof(struct scatterlist), GFP_KERNEL); - if (!(sg)) { - printk(KERN_ERR "Unable to allocate scatterlist array" + if (!sg) { + pr_err("Unable to allocate scatterlist array" " for struct rd_dev\n"); return -ENOMEM; } - sg_init_table((struct scatterlist *)&sg[0], sg_per_table); + sg_init_table(sg, sg_per_table); sg_table[i].sg_table = sg; sg_table[i].rd_sg_count = sg_per_table; @@ -185,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev) for (j = 0; j < sg_per_table; j++) { pg = alloc_pages(GFP_KERNEL, 0); - if (!(pg)) { - printk(KERN_ERR "Unable to allocate scatterlist" + if (!pg) { + pr_err("Unable to allocate scatterlist" " pages for struct rd_dev_sg_table\n"); return -ENOMEM; } @@ -198,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev) total_sg_needed -= sg_per_table; } - printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of" + pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of" " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, rd_dev->sg_table_count); @@ -215,8 +211,8 @@ static void *rd_allocate_virtdevice( struct rd_host *rd_host = hba->hba_ptr; rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); - if (!(rd_dev)) { - printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n"); + if (!rd_dev) { + pr_err("Unable to allocate memory for struct rd_dev\n"); return NULL; } @@ -226,11 +222,6 @@ static void *rd_allocate_virtdevice( return rd_dev; } -static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name) -{ - return rd_allocate_virtdevice(hba, name, 1); -} - static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) { return rd_allocate_virtdevice(hba, name, 0); @@ -270,16 +261,15 @@ static struct se_device *rd_create_virtdevice( dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; dev = transport_add_device_to_core_hba(hba, - (rd_dev->rd_direct) ? &rd_dr_template : &rd_mcp_template, se_dev, dev_flags, rd_dev, &dev_limits, prod, rev); - if (!(dev)) + if (!dev) goto fail; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; rd_dev->rd_queue_depth = dev->queue_depth; - printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" + pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" " %u pages in %u tables, %lu total bytes\n", rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, @@ -293,14 +283,6 @@ fail: return ERR_PTR(ret); } -static struct se_device *rd_DIRECT_create_virtdevice( - struct se_hba *hba, - struct se_subsystem_dev *se_dev, - void *p) -{ - return rd_create_virtdevice(hba, se_dev, p, 1); -} - static struct se_device *rd_MEMCPY_create_virtdevice( struct se_hba *hba, struct se_subsystem_dev *se_dev, @@ -327,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task) } static struct se_task * -rd_alloc_task(struct se_cmd *cmd) +rd_alloc_task(unsigned char *cdb) { struct rd_request *rd_req; rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); if (!rd_req) { - printk(KERN_ERR "Unable to allocate struct rd_request\n"); + pr_err("Unable to allocate struct rd_request\n"); return NULL; } - rd_req->rd_dev = cmd->se_dev->dev_ptr; return &rd_req->rd_task; } @@ -357,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) return sg_table; } - printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n", + pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n", page); return NULL; @@ -370,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) static int rd_MEMCPY_read(struct rd_request *req) { struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_dev; + struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *sg_d, *sg_s; void *dst, *src; @@ -379,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req) u32 rd_offset = req->rd_offset; table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) + if (!table) return -EINVAL; table_sg_end = (table->page_end_offset - req->rd_page); sg_d = task->task_sg; sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" + + pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, req->rd_page, req->rd_offset); -#endif + src_offset = rd_offset; while (req->rd_size) { if ((sg_d[i].length - dst_offset) < (sg_s[j].length - src_offset)) { length = (sg_d[i].length - dst_offset); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d" + + pr_debug("Step 1 - sg_d[%d]: %p length: %d" " offset: %u sg_s[%d].length: %u\n", i, &sg_d[i], sg_d[i].length, sg_d[i].offset, j, sg_s[j].length); - printk(KERN_INFO "Step 1 - length: %u dst_offset: %u" + pr_debug("Step 1 - length: %u dst_offset: %u" " src_offset: %u\n", length, dst_offset, src_offset); -#endif + if (length > req->rd_size) length = req->rd_size; @@ -421,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req) page_end = 0; } else { length = (sg_s[j].length - src_offset); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d" + + pr_debug("Step 2 - sg_d[%d]: %p length: %d" " offset: %u sg_s[%d].length: %u\n", i, &sg_d[i], sg_d[i].length, sg_d[i].offset, j, sg_s[j].length); - printk(KERN_INFO "Step 2 - length: %u dst_offset: %u" + pr_debug("Step 2 - length: %u dst_offset: %u" " src_offset: %u\n", length, dst_offset, src_offset); -#endif + if (length > req->rd_size) length = req->rd_size; @@ -453,31 +434,28 @@ static int rd_MEMCPY_read(struct rd_request *req) memcpy(dst, src, length); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "page: %u, remaining size: %u, length: %u," + pr_debug("page: %u, remaining size: %u, length: %u," " i: %u, j: %u\n", req->rd_page, (req->rd_size - length), length, i, j); -#endif + req->rd_size -= length; - if (!(req->rd_size)) + if (!req->rd_size) return 0; if (!page_end) continue; if (++req->rd_page <= table->page_end_offset) { -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "page: %u in same page table\n", + pr_debug("page: %u in same page table\n", req->rd_page); -#endif continue; } -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "getting new page table for page: %u\n", + + pr_debug("getting new page table for page: %u\n", req->rd_page); -#endif + table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) + if (!table) return -EINVAL; sg_s = &table->sg_table[j = 0]; @@ -493,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req) static int rd_MEMCPY_write(struct rd_request *req) { struct se_task *task = &req->rd_task; - struct rd_dev *dev = req->rd_dev; + struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; struct rd_dev_sg_table *table; struct scatterlist *sg_d, *sg_s; void *dst, *src; @@ -502,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req) u32 rd_offset = req->rd_offset; table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) + if (!table) return -EINVAL; table_sg_end = (table->page_end_offset - req->rd_page); sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; sg_s = task->task_sg; -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u," + + pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u," " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, req->rd_page, req->rd_offset); -#endif + dst_offset = rd_offset; while (req->rd_size) { if ((sg_s[i].length - src_offset) < (sg_d[j].length - dst_offset)) { length = (sg_s[i].length - src_offset); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d" + + pr_debug("Step 1 - sg_s[%d]: %p length: %d" " offset: %d sg_d[%d].length: %u\n", i, &sg_s[i], sg_s[i].length, sg_s[i].offset, j, sg_d[j].length); - printk(KERN_INFO "Step 1 - length: %u src_offset: %u" + pr_debug("Step 1 - length: %u src_offset: %u" " dst_offset: %u\n", length, src_offset, dst_offset); -#endif + if (length > req->rd_size) length = req->rd_size; @@ -544,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req) page_end = 0; } else { length = (sg_d[j].length - dst_offset); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d" + + pr_debug("Step 2 - sg_s[%d]: %p length: %d" " offset: %d sg_d[%d].length: %u\n", i, &sg_s[i], sg_s[i].length, sg_s[i].offset, j, sg_d[j].length); - printk(KERN_INFO "Step 2 - length: %u src_offset: %u" + pr_debug("Step 2 - length: %u src_offset: %u" " dst_offset: %u\n", length, src_offset, dst_offset); -#endif + if (length > req->rd_size) length = req->rd_size; @@ -576,31 +554,28 @@ static int rd_MEMCPY_write(struct rd_request *req) memcpy(dst, src, length); -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "page: %u, remaining size: %u, length: %u," + pr_debug("page: %u, remaining size: %u, length: %u," " i: %u, j: %u\n", req->rd_page, (req->rd_size - length), length, i, j); -#endif + req->rd_size -= length; - if (!(req->rd_size)) + if (!req->rd_size) return 0; if (!page_end) continue; if (++req->rd_page <= table->page_end_offset) { -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "page: %u in same page table\n", + pr_debug("page: %u in same page table\n", req->rd_page); -#endif continue; } -#ifdef DEBUG_RAMDISK_MCP - printk(KERN_INFO "getting new page table for page: %u\n", + + pr_debug("getting new page table for page: %u\n", req->rd_page); -#endif + table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) + if (!table) return -EINVAL; sg_d = &table->sg_table[j = 0]; @@ -641,273 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task) return PYX_TRANSPORT_SENT_TO_TRANSPORT; } -/* rd_DIRECT_with_offset(): - * - * - */ -static int rd_DIRECT_with_offset( - struct se_task *task, - struct list_head *se_mem_list, - u32 *se_mem_cnt, - u32 *task_offset) -{ - struct rd_request *req = RD_REQ(task); - struct rd_dev *dev = req->rd_dev; - struct rd_dev_sg_table *table; - struct se_mem *se_mem; - struct scatterlist *sg_s; - u32 j = 0, set_offset = 1; - u32 get_next_table = 0, offset_length, table_sg_end; - - table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -EINVAL; - - table_sg_end = (table->page_end_offset - req->rd_page); - sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n", - (task->task_data_direction == DMA_TO_DEVICE) ? - "Write" : "Read", - task->task_lba, req->rd_size, req->rd_page, req->rd_offset); -#endif - while (req->rd_size) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&se_mem->se_list); - - if (set_offset) { - offset_length = sg_s[j].length - req->rd_offset; - if (offset_length > req->rd_size) - offset_length = req->rd_size; - - se_mem->se_page = sg_page(&sg_s[j++]); - se_mem->se_off = req->rd_offset; - se_mem->se_len = offset_length; - - set_offset = 0; - get_next_table = (j > table_sg_end); - goto check_eot; - } - - offset_length = (req->rd_size < req->rd_offset) ? - req->rd_size : req->rd_offset; - - se_mem->se_page = sg_page(&sg_s[j]); - se_mem->se_len = offset_length; - - set_offset = 1; - -check_eot: -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u" - " se_mem: %p, se_page: %p se_off: %u se_len: %u\n", - req->rd_page, req->rd_size, offset_length, j, se_mem, - se_mem->se_page, se_mem->se_off, se_mem->se_len); -#endif - list_add_tail(&se_mem->se_list, se_mem_list); - (*se_mem_cnt)++; - - req->rd_size -= offset_length; - if (!(req->rd_size)) - goto out; - - if (!set_offset && !get_next_table) - continue; - - if (++req->rd_page <= table->page_end_offset) { -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "page: %u in same page table\n", - req->rd_page); -#endif - continue; - } -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "getting new page table for page: %u\n", - req->rd_page); -#endif - table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -EINVAL; - - sg_s = &table->sg_table[j = 0]; - } - -out: - task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", - *se_mem_cnt); -#endif - return 0; -} - -/* rd_DIRECT_without_offset(): - * - * - */ -static int rd_DIRECT_without_offset( - struct se_task *task, - struct list_head *se_mem_list, - u32 *se_mem_cnt, - u32 *task_offset) -{ - struct rd_request *req = RD_REQ(task); - struct rd_dev *dev = req->rd_dev; - struct rd_dev_sg_table *table; - struct se_mem *se_mem; - struct scatterlist *sg_s; - u32 length, j = 0; - - table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -EINVAL; - - sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n", - (task->task_data_direction == DMA_TO_DEVICE) ? - "Write" : "Read", - task->task_lba, req->rd_size, req->rd_page); -#endif - while (req->rd_size) { - se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); - if (!(se_mem)) { - printk(KERN_ERR "Unable to allocate struct se_mem\n"); - return -ENOMEM; - } - INIT_LIST_HEAD(&se_mem->se_list); - - length = (req->rd_size < sg_s[j].length) ? - req->rd_size : sg_s[j].length; - - se_mem->se_page = sg_page(&sg_s[j++]); - se_mem->se_len = length; - -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p," - " se_page: %p se_off: %u se_len: %u\n", req->rd_page, - req->rd_size, j, se_mem, se_mem->se_page, - se_mem->se_off, se_mem->se_len); -#endif - list_add_tail(&se_mem->se_list, se_mem_list); - (*se_mem_cnt)++; - - req->rd_size -= length; - if (!(req->rd_size)) - goto out; - - if (++req->rd_page <= table->page_end_offset) { -#ifdef DEBUG_RAMDISK_DR - printk("page: %u in same page table\n", - req->rd_page); -#endif - continue; - } -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "getting new page table for page: %u\n", - req->rd_page); -#endif - table = rd_get_sg_table(dev, req->rd_page); - if (!(table)) - return -EINVAL; - - sg_s = &table->sg_table[j = 0]; - } - -out: - task->task_se_cmd->t_tasks_se_num += *se_mem_cnt; -#ifdef DEBUG_RAMDISK_DR - printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", - *se_mem_cnt); -#endif - return 0; -} - -/* rd_DIRECT_do_se_mem_map(): - * - * - */ -static int rd_DIRECT_do_se_mem_map( - struct se_task *task, - struct list_head *se_mem_list, - void *in_mem, - struct se_mem *in_se_mem, - struct se_mem **out_se_mem, - u32 *se_mem_cnt, - u32 *task_offset_in) -{ - struct se_cmd *cmd = task->task_se_cmd; - struct rd_request *req = RD_REQ(task); - u32 task_offset = *task_offset_in; - unsigned long long lba; - int ret; - int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size; - - lba = task->task_lba; - req->rd_page = ((task->task_lba * block_size) / PAGE_SIZE); - req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size; - req->rd_size = task->task_size; - - if (req->rd_offset) - ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt, - task_offset_in); - else - ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt, - task_offset_in); - - if (ret < 0) - return ret; - - if (cmd->se_tfo->task_sg_chaining == 0) - return 0; - /* - * Currently prevent writers from multiple HW fabrics doing - * pci_map_sg() to RD_DR's internal scatterlist memory. - */ - if (cmd->data_direction == DMA_TO_DEVICE) { - printk(KERN_ERR "DMA_TO_DEVICE not supported for" - " RAMDISK_DR with task_sg_chaining=1\n"); - return -ENOSYS; - } - /* - * Special case for if task_sg_chaining is enabled, then - * we setup struct se_task->task_sg[], as it will be used by - * transport_do_task_sg_chain() for creating chainged SGLs - * across multiple struct se_task->task_sg[]. - */ - ret = transport_init_task_sg(task, - list_first_entry(&cmd->t_mem_list, - struct se_mem, se_list), - task_offset); - if (ret <= 0) - return ret; - - return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, - list_first_entry(&cmd->t_mem_list, - struct se_mem, se_list), - out_se_mem, se_mem_cnt, task_offset_in); -} - -/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template) - * - * - */ -static int rd_DIRECT_do_task(struct se_task *task) -{ - /* - * At this point the locally allocated RD tables have been mapped - * to struct se_mem elements in rd_DIRECT_do_se_mem_map(). - */ - task->task_scsi_status = GOOD; - transport_complete_task(task, 1); - - return PYX_TRANSPORT_SENT_TO_TRANSPORT; -} - /* rd_free_task(): (Part of se_subsystem_api_t template) * * @@ -952,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params( case Opt_rd_pages: match_int(args, &arg); rd_dev->rd_page_count = arg; - printk(KERN_INFO "RAMDISK: Referencing Page" + pr_debug("RAMDISK: Referencing Page" " Count: %u\n", rd_dev->rd_page_count); rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; break; @@ -970,7 +678,7 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { - printk(KERN_INFO "Missing rd_pages= parameter\n"); + pr_debug("Missing rd_pages= parameter\n"); return -EINVAL; } @@ -1022,27 +730,6 @@ static sector_t rd_get_blocks(struct se_device *dev) return blocks_long; } -static struct se_subsystem_api rd_dr_template = { - .name = "rd_dr", - .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, - .attach_hba = rd_attach_hba, - .detach_hba = rd_detach_hba, - .allocate_virtdevice = rd_DIRECT_allocate_virtdevice, - .create_virtdevice = rd_DIRECT_create_virtdevice, - .free_device = rd_free_device, - .alloc_task = rd_alloc_task, - .do_task = rd_DIRECT_do_task, - .free_task = rd_free_task, - .check_configfs_dev_params = rd_check_configfs_dev_params, - .set_configfs_dev_params = rd_set_configfs_dev_params, - .show_configfs_dev_params = rd_show_configfs_dev_params, - .get_cdb = rd_get_cdb, - .get_device_rev = rd_get_device_rev, - .get_device_type = rd_get_device_type, - .get_blocks = rd_get_blocks, - .do_se_mem_map = rd_DIRECT_do_se_mem_map, -}; - static struct se_subsystem_api rd_mcp_template = { .name = "rd_mcp", .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, @@ -1067,13 +754,8 @@ int __init rd_module_init(void) { int ret; - ret = transport_subsystem_register(&rd_dr_template); - if (ret < 0) - return ret; - ret = transport_subsystem_register(&rd_mcp_template); if (ret < 0) { - transport_subsystem_release(&rd_dr_template); return ret; } @@ -1082,6 +764,5 @@ int __init rd_module_init(void) void rd_module_exit(void) { - transport_subsystem_release(&rd_dr_template); transport_subsystem_release(&rd_mcp_template); } diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index bab93020a3a9..0d027732cd00 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -32,8 +32,6 @@ struct rd_request { u32 rd_page_count; /* Scatterlist count */ u32 rd_size; - /* Ramdisk device */ - struct rd_dev *rd_dev; } ____cacheline_aligned; struct rd_dev_sg_table { diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index f1feea3b2319..27d4925e51c3 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -41,13 +41,6 @@ #include "target_core_alua.h" #include "target_core_pr.h" -#define DEBUG_LUN_RESET -#ifdef DEBUG_LUN_RESET -#define DEBUG_LR(x...) printk(KERN_INFO x) -#else -#define DEBUG_LR(x...) -#endif - struct se_tmr_req *core_tmr_alloc_req( struct se_cmd *se_cmd, void *fabric_tmr_ptr, @@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req( tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? GFP_ATOMIC : GFP_KERNEL); - if (!(tmr)) { - printk(KERN_ERR "Unable to allocate struct se_tmr_req\n"); + if (!tmr) { + pr_err("Unable to allocate struct se_tmr_req\n"); return ERR_PTR(-ENOMEM); } tmr->task_cmd = se_cmd; @@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort( int tas, int fe_count) { - if (!(fe_count)) { + if (!fe_count) { transport_cmd_finish_abort(cmd, 1); return; } /* * TASK ABORTED status (TAS) bit support */ - if (((tmr_nacl != NULL) && + if ((tmr_nacl && (tmr_nacl == cmd->se_sess->se_node_acl)) || tas) transport_send_task_abort(cmd); @@ -141,13 +134,13 @@ int core_tmr_lun_reset( tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; tmr_tpg = tmr->task_cmd->se_sess->se_tpg; if (tmr_nacl && tmr_tpg) { - DEBUG_LR("LUN_RESET: TMR caller fabric: %s" + pr_debug("LUN_RESET: TMR caller fabric: %s" " initiator port %s\n", tmr_tpg->se_tpg_tfo->get_fabric_name(), tmr_nacl->initiatorname); } } - DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n", + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", (preempt_and_abort_list) ? "Preempt" : "TMR", dev->transport->name, tas); /* @@ -163,8 +156,8 @@ int core_tmr_lun_reset( continue; cmd = tmr_p->task_cmd; - if (!(cmd)) { - printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n"); + if (!cmd) { + pr_err("Unable to locate struct se_cmd for TMR\n"); continue; } /* @@ -172,14 +165,14 @@ int core_tmr_lun_reset( * parameter (eg: for PROUT PREEMPT_AND_ABORT service action * skip non regisration key matching TMRs. */ - if ((preempt_and_abort_list != NULL) && + if (preempt_and_abort_list && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; spin_unlock_irq(&dev->se_tmr_lock); spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!(atomic_read(&cmd->t_transport_active))) { + if (!atomic_read(&cmd->t_transport_active)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_lock_irq(&dev->se_tmr_lock); continue; @@ -189,7 +182,7 @@ int core_tmr_lun_reset( spin_lock_irq(&dev->se_tmr_lock); continue; } - DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x," + pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x," " Response: 0x%02x, t_state: %d\n", (preempt_and_abort_list) ? "Preempt" : "", tmr_p, tmr_p->function, tmr_p->response, cmd->t_state); @@ -224,7 +217,7 @@ int core_tmr_lun_reset( list_for_each_entry_safe(task, task_tmp, &dev->state_task_list, t_state_list) { if (!task->task_se_cmd) { - printk(KERN_ERR "task->task_se_cmd is NULL!\n"); + pr_err("task->task_se_cmd is NULL!\n"); continue; } cmd = task->task_se_cmd; @@ -233,7 +226,7 @@ int core_tmr_lun_reset( * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ - if ((preempt_and_abort_list != NULL) && + if (preempt_and_abort_list && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; @@ -248,14 +241,14 @@ int core_tmr_lun_reset( spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags); - DEBUG_LR("LUN_RESET: %s cmd: %p task: %p" + pr_debug("LUN_RESET: %s cmd: %p task: %p" " ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" "def_t_state: %d/%d cdb: 0x%02x\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, task, cmd->se_tfo->get_task_tag(cmd), 0, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, cmd->t_task_cdb[0]); - DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" + pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx" " t_task_cdbs: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", @@ -272,10 +265,10 @@ int core_tmr_lun_reset( spin_unlock_irqrestore( &cmd->t_state_lock, flags); - DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown" + pr_debug("LUN_RESET: Waiting for task: %p to shutdown" " for dev: %p\n", task, dev); wait_for_completion(&task->task_stop_comp); - DEBUG_LR("LUN_RESET Completed task: %p shutdown for" + pr_debug("LUN_RESET Completed task: %p shutdown for" " dev: %p\n", task, dev); spin_lock_irqsave(&cmd->t_state_lock, flags); atomic_dec(&cmd->t_task_cdbs_left); @@ -288,10 +281,10 @@ int core_tmr_lun_reset( } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { + if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { spin_unlock_irqrestore( &cmd->t_state_lock, flags); - DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for" + pr_debug("LUN_RESET: Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, atomic_read(&cmd->t_task_cdbs_ex_left)); @@ -301,7 +294,7 @@ int core_tmr_lun_reset( fe_count = atomic_read(&cmd->t_fe_count); if (atomic_read(&cmd->t_transport_active)) { - DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" + pr_debug("LUN_RESET: got t_transport_active = 1 for" " task: %p, t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); @@ -312,7 +305,7 @@ int core_tmr_lun_reset( spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," + pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p," " t_fe_count: %d dev: %p\n", task, fe_count, dev); atomic_set(&cmd->t_transport_aborted, 1); spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -335,7 +328,7 @@ int core_tmr_lun_reset( * For PREEMPT_AND_ABORT usage, only process commands * with a matching reservation key. */ - if ((preempt_and_abort_list != NULL) && + if (preempt_and_abort_list && (core_scsi3_check_cdb_abort_and_preempt( preempt_and_abort_list, cmd) != 0)) continue; @@ -350,7 +343,7 @@ int core_tmr_lun_reset( list_del(&cmd->se_queue_node); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:" + pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" " %d t_fe_count: %d\n", (preempt_and_abort_list) ? "Preempt" : "", cmd, cmd->t_state, atomic_read(&cmd->t_fe_count)); @@ -368,20 +361,20 @@ int core_tmr_lun_reset( * Clear any legacy SPC-2 reservation when called during * LOGICAL UNIT RESET */ - if (!(preempt_and_abort_list) && + if (!preempt_and_abort_list && (dev->dev_flags & DF_SPC2_RESERVATIONS)) { spin_lock(&dev->dev_reservation_lock); dev->dev_reserved_node_acl = NULL; dev->dev_flags &= ~DF_SPC2_RESERVATIONS; spin_unlock(&dev->dev_reservation_lock); - printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n"); + pr_debug("LUN_RESET: SCSI-2 Released reservation\n"); } spin_lock_irq(&dev->stats_lock); dev->num_resets++; spin_unlock_irq(&dev->stats_lock); - DEBUG_LR("LUN_RESET: %s for [%s] Complete\n", + pr_debug("LUN_RESET: %s for [%s] Complete\n", (preempt_and_abort_list) ? "Preempt" : "TMR", dev->transport->name); return 0; diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 448129f74cf9..4f1ba4c5ef11 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -72,7 +72,7 @@ static void core_clear_initiator_node_from_tpg( continue; if (!deve->se_lun) { - printk(KERN_ERR "%s device entries device pointer is" + pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n", tpg->se_tpg_tfo->get_fabric_name()); continue; @@ -86,14 +86,13 @@ static void core_clear_initiator_node_from_tpg( spin_lock(&lun->lun_acl_lock); list_for_each_entry_safe(acl, acl_tmp, &lun->lun_acl_list, lacl_list) { - if (!(strcmp(acl->initiatorname, - nacl->initiatorname)) && - (acl->mapped_lun == deve->mapped_lun)) + if (!strcmp(acl->initiatorname, nacl->initiatorname) && + (acl->mapped_lun == deve->mapped_lun)) break; } if (!acl) { - printk(KERN_ERR "Unable to locate struct se_lun_acl for %s," + pr_err("Unable to locate struct se_lun_acl for %s," " mapped_lun: %u\n", nacl->initiatorname, deve->mapped_lun); spin_unlock(&lun->lun_acl_lock); @@ -121,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl( struct se_node_acl *acl; list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { - if (!(strcmp(acl->initiatorname, initiatorname))) + if (!strcmp(acl->initiatorname, initiatorname)) return acl; } @@ -140,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl( spin_lock_bh(&tpg->acl_node_lock); list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { - if (!(strcmp(acl->initiatorname, initiatorname)) && - (!(acl->dynamic_node_acl))) { + if (!strcmp(acl->initiatorname, initiatorname) && + !acl->dynamic_node_acl) { spin_unlock_bh(&tpg->acl_node_lock); return acl; } @@ -177,7 +176,7 @@ void core_tpg_add_node_to_devs( * By default in LIO-Target $FABRIC_MOD, * demo_mode_write_protect is ON, or READ_ONLY; */ - if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) { + if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) { if (dev->dev_flags & DF_READ_ONLY) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; else @@ -193,7 +192,7 @@ void core_tpg_add_node_to_devs( lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; } - printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" + pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" " access for LUN in Demo Mode\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, @@ -216,7 +215,7 @@ static int core_set_queue_depth_for_node( struct se_node_acl *acl) { if (!acl->queue_depth) { - printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," + pr_err("Queue depth for %s Initiator Node: %s is 0," "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); acl->queue_depth = 1; @@ -236,8 +235,8 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl) nacl->device_list = kzalloc(sizeof(struct se_dev_entry) * TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL); - if (!(nacl->device_list)) { - printk(KERN_ERR "Unable to allocate memory for" + if (!nacl->device_list) { + pr_err("Unable to allocate memory for" " struct se_node_acl->device_list\n"); return -ENOMEM; } @@ -265,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( struct se_node_acl *acl; acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); - if ((acl)) + if (acl) return acl; - if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))) + if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) return NULL; acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg); - if (!(acl)) + if (!acl) return NULL; INIT_LIST_HEAD(&acl->acl_list); @@ -307,7 +306,7 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( tpg->num_node_acls++; spin_unlock_bh(&tpg->acl_node_lock); - printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" + pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, tpg->se_tpg_tfo->get_fabric_name(), initiatorname); @@ -357,10 +356,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( spin_lock_bh(&tpg->acl_node_lock); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); - if ((acl)) { + if (acl) { if (acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; - printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" + pr_debug("%s_TPG[%u] - Replacing dynamic ACL" " for %s\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); spin_unlock_bh(&tpg->acl_node_lock); @@ -375,7 +374,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( goto done; } - printk(KERN_ERR "ACL entry for %s Initiator" + pr_err("ACL entry for %s Initiator" " Node %s already exists for TPG %u, ignoring" " request.\n", tpg->se_tpg_tfo->get_fabric_name(), initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); @@ -384,8 +383,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( } spin_unlock_bh(&tpg->acl_node_lock); - if (!(se_nacl)) { - printk("struct se_node_acl pointer is NULL\n"); + if (!se_nacl) { + pr_err("struct se_node_acl pointer is NULL\n"); return ERR_PTR(-EINVAL); } /* @@ -425,7 +424,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl( spin_unlock_bh(&tpg->acl_node_lock); done: - printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" + pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, tpg->se_tpg_tfo->get_fabric_name(), initiatorname); @@ -463,7 +462,7 @@ int core_tpg_del_initiator_node_acl( /* * Determine if the session needs to be closed by our context. */ - if (!(tpg->se_tpg_tfo->shutdown_session(sess))) + if (!tpg->se_tpg_tfo->shutdown_session(sess)) continue; spin_unlock_bh(&tpg->session_lock); @@ -481,7 +480,7 @@ int core_tpg_del_initiator_node_acl( core_clear_initiator_node_from_tpg(acl, tpg); core_free_device_list_for_node(acl, tpg); - printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" + pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth, tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname); @@ -506,8 +505,8 @@ int core_tpg_set_initiator_node_queue_depth( spin_lock_bh(&tpg->acl_node_lock); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); - if (!(acl)) { - printk(KERN_ERR "Access Control List entry for %s Initiator" + if (!acl) { + pr_err("Access Control List entry for %s Initiator" " Node %s does not exists for TPG %hu, ignoring" " request.\n", tpg->se_tpg_tfo->get_fabric_name(), initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); @@ -526,7 +525,7 @@ int core_tpg_set_initiator_node_queue_depth( continue; if (!force) { - printk(KERN_ERR "Unable to change queue depth for %s" + pr_err("Unable to change queue depth for %s" " Initiator Node: %s while session is" " operational. To forcefully change the queue" " depth and force session reinstatement" @@ -543,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth( /* * Determine if the session needs to be closed by our context. */ - if (!(tpg->se_tpg_tfo->shutdown_session(sess))) + if (!tpg->se_tpg_tfo->shutdown_session(sess)) continue; init_sess = sess; @@ -586,7 +585,7 @@ int core_tpg_set_initiator_node_queue_depth( if (init_sess) tpg->se_tpg_tfo->close_session(init_sess); - printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" + pr_debug("Successfuly changed queue depth to: %d for Initiator" " Node: %s on %s Target Portal Group: %u\n", queue_depth, initiatorname, tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg)); @@ -644,8 +643,8 @@ int core_tpg_register( se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) * TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL); - if (!(se_tpg->tpg_lun_list)) { - printk(KERN_ERR "Unable to allocate struct se_portal_group->" + if (!se_tpg->tpg_lun_list) { + pr_err("Unable to allocate struct se_portal_group->" "tpg_lun_list\n"); return -ENOMEM; } @@ -686,7 +685,7 @@ int core_tpg_register( list_add_tail(&se_tpg->se_tpg_node, &tpg_list); spin_unlock_bh(&tpg_lock); - printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" + pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for" " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ? @@ -700,7 +699,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) { struct se_node_acl *nacl, *nacl_tmp; - printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" + pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group" " for endpoint: %s Portal Tag %u\n", (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(), @@ -749,7 +748,7 @@ struct se_lun *core_tpg_pre_addlun( struct se_lun *lun; if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" "-1: %u for Target Portal Group: %u\n", tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, @@ -760,7 +759,7 @@ struct se_lun *core_tpg_pre_addlun( spin_lock(&tpg->tpg_lun_lock); lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { - printk(KERN_ERR "TPG Logical Unit Number: %u is already active" + pr_err("TPG Logical Unit Number: %u is already active" " on %s Target Portal Group: %u, ignoring request.\n", unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(), tpg->se_tpg_tfo->tpg_get_tag(tpg)); @@ -808,7 +807,7 @@ struct se_lun *core_tpg_pre_dellun( struct se_lun *lun; if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { - printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" + pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" "-1: %u for Target Portal Group: %u\n", tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, @@ -819,7 +818,7 @@ struct se_lun *core_tpg_pre_dellun( spin_lock(&tpg->tpg_lun_lock); lun = &tpg->tpg_lun_list[unpacked_lun]; if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { - printk(KERN_ERR "%s Logical Unit Number: %u is not active on" + pr_err("%s Logical Unit Number: %u is not active on" " Target Portal Group: %u, ignoring request.\n", tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, tpg->se_tpg_tfo->tpg_get_tag(tpg)); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index c743d94baf77..55b6588904a4 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -58,132 +58,6 @@ #include "target_core_scdb.h" #include "target_core_ua.h" -/* #define DEBUG_CDB_HANDLER */ -#ifdef DEBUG_CDB_HANDLER -#define DEBUG_CDB_H(x...) printk(KERN_INFO x) -#else -#define DEBUG_CDB_H(x...) -#endif - -/* #define DEBUG_CMD_MAP */ -#ifdef DEBUG_CMD_MAP -#define DEBUG_CMD_M(x...) printk(KERN_INFO x) -#else -#define DEBUG_CMD_M(x...) -#endif - -/* #define DEBUG_MEM_ALLOC */ -#ifdef DEBUG_MEM_ALLOC -#define DEBUG_MEM(x...) printk(KERN_INFO x) -#else -#define DEBUG_MEM(x...) -#endif - -/* #define DEBUG_MEM2_ALLOC */ -#ifdef DEBUG_MEM2_ALLOC -#define DEBUG_MEM2(x...) printk(KERN_INFO x) -#else -#define DEBUG_MEM2(x...) -#endif - -/* #define DEBUG_SG_CALC */ -#ifdef DEBUG_SG_CALC -#define DEBUG_SC(x...) printk(KERN_INFO x) -#else -#define DEBUG_SC(x...) -#endif - -/* #define DEBUG_SE_OBJ */ -#ifdef DEBUG_SE_OBJ -#define DEBUG_SO(x...) printk(KERN_INFO x) -#else -#define DEBUG_SO(x...) -#endif - -/* #define DEBUG_CMD_VOL */ -#ifdef DEBUG_CMD_VOL -#define DEBUG_VOL(x...) printk(KERN_INFO x) -#else -#define DEBUG_VOL(x...) -#endif - -/* #define DEBUG_CMD_STOP */ -#ifdef DEBUG_CMD_STOP -#define DEBUG_CS(x...) printk(KERN_INFO x) -#else -#define DEBUG_CS(x...) -#endif - -/* #define DEBUG_PASSTHROUGH */ -#ifdef DEBUG_PASSTHROUGH -#define DEBUG_PT(x...) printk(KERN_INFO x) -#else -#define DEBUG_PT(x...) -#endif - -/* #define DEBUG_TASK_STOP */ -#ifdef DEBUG_TASK_STOP -#define DEBUG_TS(x...) printk(KERN_INFO x) -#else -#define DEBUG_TS(x...) -#endif - -/* #define DEBUG_TRANSPORT_STOP */ -#ifdef DEBUG_TRANSPORT_STOP -#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x) -#else -#define DEBUG_TRANSPORT_S(x...) -#endif - -/* #define DEBUG_TASK_FAILURE */ -#ifdef DEBUG_TASK_FAILURE -#define DEBUG_TF(x...) printk(KERN_INFO x) -#else -#define DEBUG_TF(x...) -#endif - -/* #define DEBUG_DEV_OFFLINE */ -#ifdef DEBUG_DEV_OFFLINE -#define DEBUG_DO(x...) printk(KERN_INFO x) -#else -#define DEBUG_DO(x...) -#endif - -/* #define DEBUG_TASK_STATE */ -#ifdef DEBUG_TASK_STATE -#define DEBUG_TSTATE(x...) printk(KERN_INFO x) -#else -#define DEBUG_TSTATE(x...) -#endif - -/* #define DEBUG_STATUS_THR */ -#ifdef DEBUG_STATUS_THR -#define DEBUG_ST(x...) printk(KERN_INFO x) -#else -#define DEBUG_ST(x...) -#endif - -/* #define DEBUG_TASK_TIMEOUT */ -#ifdef DEBUG_TASK_TIMEOUT -#define DEBUG_TT(x...) printk(KERN_INFO x) -#else -#define DEBUG_TT(x...) -#endif - -/* #define DEBUG_GENERIC_REQUEST_FAILURE */ -#ifdef DEBUG_GENERIC_REQUEST_FAILURE -#define DEBUG_GRF(x...) printk(KERN_INFO x) -#else -#define DEBUG_GRF(x...) -#endif - -/* #define DEBUG_SAM_TASK_ATTRS */ -#ifdef DEBUG_SAM_TASK_ATTRS -#define DEBUG_STA(x...) printk(KERN_INFO x) -#else -#define DEBUG_STA(x...) -#endif - static int sub_api_initialized; static struct kmem_cache *se_cmd_cache; @@ -225,62 +99,62 @@ int init_se_kmem_caches(void) { se_cmd_cache = kmem_cache_create("se_cmd_cache", sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL); - if (!(se_cmd_cache)) { - printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n"); + if (!se_cmd_cache) { + pr_err("kmem_cache_create for struct se_cmd failed\n"); goto out; } se_tmr_req_cache = kmem_cache_create("se_tmr_cache", sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req), 0, NULL); - if (!(se_tmr_req_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req" + if (!se_tmr_req_cache) { + pr_err("kmem_cache_create() for struct se_tmr_req" " failed\n"); goto out; } se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), 0, NULL); - if (!(se_sess_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_session" + if (!se_sess_cache) { + pr_err("kmem_cache_create() for struct se_session" " failed\n"); goto out; } se_ua_cache = kmem_cache_create("se_ua_cache", sizeof(struct se_ua), __alignof__(struct se_ua), 0, NULL); - if (!(se_ua_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n"); + if (!se_ua_cache) { + pr_err("kmem_cache_create() for struct se_ua failed\n"); goto out; } t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", sizeof(struct t10_pr_registration), __alignof__(struct t10_pr_registration), 0, NULL); - if (!(t10_pr_reg_cache)) { - printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration" + if (!t10_pr_reg_cache) { + pr_err("kmem_cache_create() for struct t10_pr_registration" " failed\n"); goto out; } t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 0, NULL); - if (!(t10_alua_lu_gp_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache" + if (!t10_alua_lu_gp_cache) { + pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" " failed\n"); goto out; } t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", sizeof(struct t10_alua_lu_gp_member), __alignof__(struct t10_alua_lu_gp_member), 0, NULL); - if (!(t10_alua_lu_gp_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_" + if (!t10_alua_lu_gp_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" "cache failed\n"); goto out; } t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", sizeof(struct t10_alua_tg_pt_gp), __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); - if (!(t10_alua_tg_pt_gp_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" + if (!t10_alua_tg_pt_gp_cache) { + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "cache failed\n"); goto out; } @@ -289,8 +163,8 @@ int init_se_kmem_caches(void) sizeof(struct t10_alua_tg_pt_gp_member), __alignof__(struct t10_alua_tg_pt_gp_member), 0, NULL); - if (!(t10_alua_tg_pt_gp_mem_cache)) { - printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_" + if (!t10_alua_tg_pt_gp_mem_cache) { + pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "mem_t failed\n"); goto out; } @@ -366,19 +240,19 @@ static int transport_subsystem_reqmods(void) ret = request_module("target_core_iblock"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_iblock\n"); + pr_err("Unable to load target_core_iblock\n"); ret = request_module("target_core_file"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_file\n"); + pr_err("Unable to load target_core_file\n"); ret = request_module("target_core_pscsi"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_pscsi\n"); + pr_err("Unable to load target_core_pscsi\n"); ret = request_module("target_core_stgt"); if (ret != 0) - printk(KERN_ERR "Unable to load target_core_stgt\n"); + pr_err("Unable to load target_core_stgt\n"); return 0; } @@ -405,8 +279,8 @@ struct se_session *transport_init_session(void) struct se_session *se_sess; se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); - if (!(se_sess)) { - printk(KERN_ERR "Unable to allocate struct se_session from" + if (!se_sess) { + pr_err("Unable to allocate struct se_session from" " se_sess_cache\n"); return ERR_PTR(-ENOMEM); } @@ -460,7 +334,7 @@ void __transport_register_session( } list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); - printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", + pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr); } EXPORT_SYMBOL(__transport_register_session); @@ -485,7 +359,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess) * Used by struct se_node_acl's under ConfigFS to locate active struct se_session */ se_nacl = se_sess->se_node_acl; - if ((se_nacl)) { + if (se_nacl) { spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); list_del(&se_sess->sess_acl_list); /* @@ -516,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess) struct se_portal_group *se_tpg = se_sess->se_tpg; struct se_node_acl *se_nacl; - if (!(se_tpg)) { + if (!se_tpg) { transport_free_session(se_sess); return; } @@ -532,11 +406,11 @@ void transport_deregister_session(struct se_session *se_sess) * struct se_node_acl if it had been previously dynamically generated. */ se_nacl = se_sess->se_node_acl; - if ((se_nacl)) { + if (se_nacl) { spin_lock_bh(&se_tpg->acl_node_lock); if (se_nacl->dynamic_node_acl) { - if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( - se_tpg))) { + if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache( + se_tpg)) { list_del(&se_nacl->acl_list); se_tpg->num_node_acls--; spin_unlock_bh(&se_tpg->acl_node_lock); @@ -553,7 +427,7 @@ void transport_deregister_session(struct se_session *se_sess) transport_free_session(se_sess); - printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n", + pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", se_tpg->se_tpg_tfo->get_fabric_name()); } EXPORT_SYMBOL(transport_deregister_session); @@ -569,19 +443,19 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd) list_for_each_entry(task, &cmd->t_task_list, t_list) { dev = task->se_dev; - if (!(dev)) + if (!dev) continue; if (atomic_read(&task->task_active)) continue; - if (!(atomic_read(&task->task_state_active))) + if (!atomic_read(&task->task_state_active)) continue; spin_lock_irqsave(&dev->execute_task_lock, flags); list_del(&task->t_state_list); - DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n", - cmd->se_tfo->tfo_get_task_tag(cmd), dev, task); + pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n", + cmd->se_tfo->get_task_tag(cmd), dev, task); spin_unlock_irqrestore(&dev->execute_task_lock, flags); atomic_set(&task->task_state_active, 0); @@ -610,7 +484,7 @@ static int transport_cmd_check_stop( * command for LUN shutdown purposes. */ if (atomic_read(&cmd->transport_lun_stop)) { - DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)" + pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)" " == TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); @@ -629,7 +503,7 @@ static int transport_cmd_check_stop( * this command for frontend exceptions. */ if (atomic_read(&cmd->t_transport_stop)) { - DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) ==" + pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) ==" " TRUE for ITT: 0x%08x\n", __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd)); @@ -695,7 +569,7 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) return; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!(atomic_read(&cmd->transport_dev_active))) { + if (!atomic_read(&cmd->transport_dev_active)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto check_lun; } @@ -710,7 +584,7 @@ check_lun: list_del(&cmd->se_lun_node); atomic_set(&cmd->transport_lun_active, 0); #if 0 - printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n" + pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n" cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun); #endif } @@ -797,7 +671,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, unsigned long flags; spin_lock_irqsave(&qobj->cmd_queue_lock, flags); - if (!(atomic_read(&cmd->t_transport_queue_active))) { + if (!atomic_read(&cmd->t_transport_queue_active)) { spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); return; } @@ -812,7 +686,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); if (atomic_read(&cmd->t_transport_queue_active)) { - printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n", + pr_err("ITT: 0x%08x t_transport_queue_active: %d\n", cmd->se_tfo->get_task_tag(cmd), atomic_read(&cmd->t_transport_queue_active)); } @@ -853,7 +727,7 @@ void transport_complete_task(struct se_task *task, int success) int t_state; unsigned long flags; #if 0 - printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task, + pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, cmd->t_task_cdb[0], dev); #endif if (dev) @@ -899,8 +773,8 @@ void transport_complete_task(struct se_task *task, int success) * the processing thread. */ if (atomic_read(&task->task_timeout)) { - if (!(atomic_dec_and_test( - &cmd->t_task_cdbs_timeout_left))) { + if (!atomic_dec_and_test( + &cmd->t_task_cdbs_timeout_left)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; @@ -918,7 +792,7 @@ void transport_complete_task(struct se_task *task, int success) * struct se_task from struct se_cmd will complete itself into the * device queue depending upon int success. */ - if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { + if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { if (!success) cmd->t_tasks_failed = 1; @@ -976,9 +850,9 @@ static inline int transport_add_task_check_sam_attr( &task_prev->t_execute_list : &dev->execute_task_list); - DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x" + pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x" " in execution queue\n", - T_TASK(task->task_se_cmd)->t_task_cdb[0]); + task->task_se_cmd->t_task_cdb[0]); return 1; } /* @@ -1020,7 +894,7 @@ static void __transport_add_task_to_execute_queue( atomic_set(&task->task_state_active, 1); - DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd), task, dev); } @@ -1042,8 +916,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd) list_add_tail(&task->t_state_list, &dev->state_task_list); atomic_set(&task->task_state_active, 1); - DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n", - task->se_cmd->se_tfo->get_task_tag( + pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n", + task->task_se_cmd->se_tfo->get_task_tag( task->task_se_cmd), task, dev); spin_unlock(&dev->execute_task_lock); @@ -1112,7 +986,7 @@ static void target_qf_do_work(struct work_struct *work) smp_mb__after_atomic_dec(); spin_unlock_irq(&dev->qf_cmd_lock); - printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue" + pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" @@ -1197,7 +1071,7 @@ static void transport_release_all_cmds(struct se_device *dev) spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); - printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u," + pr_err("Releasing ITT: 0x%08x, i_state: %u," " t_state: %u directly\n", cmd->se_tfo->get_task_tag(cmd), cmd->se_tfo->get_cmd_state(cmd), t_state); @@ -1264,7 +1138,7 @@ void transport_dump_vpd_proto_id( if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk(KERN_INFO "%s", buf); + pr_debug("%s", buf); } void @@ -1314,7 +1188,7 @@ int transport_dump_vpd_assoc( if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk("%s", buf); + pr_debug("%s", buf); return ret; } @@ -1374,7 +1248,7 @@ int transport_dump_vpd_ident_type( return -EINVAL; strncpy(p_buf, buf, p_buf_len); } else { - printk("%s", buf); + pr_debug("%s", buf); } return ret; @@ -1425,7 +1299,7 @@ int transport_dump_vpd_ident( if (p_buf) strncpy(p_buf, buf, p_buf_len); else - printk("%s", buf); + pr_debug("%s", buf); return ret; } @@ -1482,7 +1356,7 @@ static void core_setup_task_attr_emulation(struct se_device *dev) } dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED; - DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" + pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x" " device\n", dev->transport->name, dev->transport->get_device_rev(dev)); } @@ -1494,32 +1368,32 @@ static void scsi_dump_inquiry(struct se_device *dev) /* * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer */ - printk(" Vendor: "); + pr_debug(" Vendor: "); for (i = 0; i < 8; i++) if (wwn->vendor[i] >= 0x20) - printk("%c", wwn->vendor[i]); + pr_debug("%c", wwn->vendor[i]); else - printk(" "); + pr_debug(" "); - printk(" Model: "); + pr_debug(" Model: "); for (i = 0; i < 16; i++) if (wwn->model[i] >= 0x20) - printk("%c", wwn->model[i]); + pr_debug("%c", wwn->model[i]); else - printk(" "); + pr_debug(" "); - printk(" Revision: "); + pr_debug(" Revision: "); for (i = 0; i < 4; i++) if (wwn->revision[i] >= 0x20) - printk("%c", wwn->revision[i]); + pr_debug("%c", wwn->revision[i]); else - printk(" "); + pr_debug(" "); - printk("\n"); + pr_debug("\n"); device_type = dev->transport->get_device_type(dev); - printk(" Type: %s ", scsi_device_type(device_type)); - printk(" ANSI SCSI revision: %02x\n", + pr_debug(" Type: %s ", scsi_device_type(device_type)); + pr_debug(" ANSI SCSI revision: %02x\n", dev->transport->get_device_rev(dev)); } @@ -1537,8 +1411,8 @@ struct se_device *transport_add_device_to_core_hba( struct se_device *dev; dev = kzalloc(sizeof(struct se_device), GFP_KERNEL); - if (!(dev)) { - printk(KERN_ERR "Unable to allocate memory for se_dev_t\n"); + if (!dev) { + pr_err("Unable to allocate memory for se_dev_t\n"); return NULL; } @@ -1608,7 +1482,7 @@ struct se_device *transport_add_device_to_core_hba( dev->process_thread = kthread_run(transport_processing_thread, dev, "LIO_%s", dev->transport->name); if (IS_ERR(dev->process_thread)) { - printk(KERN_ERR "Unable to create kthread: LIO_%s\n", + pr_err("Unable to create kthread: LIO_%s\n", dev->transport->name); goto out; } @@ -1626,7 +1500,7 @@ struct se_device *transport_add_device_to_core_hba( */ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { if (!inquiry_prod || !inquiry_rev) { - printk(KERN_ERR "All non TCM/pSCSI plugins require" + pr_err("All non TCM/pSCSI plugins require" " INQUIRY consts\n"); goto out; } @@ -1688,9 +1562,9 @@ transport_generic_get_task(struct se_cmd *cmd, struct se_task *task; struct se_device *dev = cmd->se_dev; - task = dev->transport->alloc_task(cmd); + task = dev->transport->alloc_task(cmd->t_task_cdb); if (!task) { - printk(KERN_ERR "Unable to allocate struct se_task\n"); + pr_err("Unable to allocate struct se_task\n"); return NULL; } @@ -1751,7 +1625,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) return 0; if (cmd->sam_task_attr == MSG_ACA_TAG) { - DEBUG_STA("SAM Task Attribute ACA" + pr_debug("SAM Task Attribute ACA" " emulation is not supported\n"); return -EINVAL; } @@ -1761,9 +1635,9 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd) */ cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id); smp_mb__after_atomic_inc(); - DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", + pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, - TRANSPORT(cmd->se_dev)->name); + cmd->se_dev->transport->name); return 0; } @@ -1804,7 +1678,7 @@ int transport_generic_allocate_tasks( * for VARIABLE_LENGTH_CMD */ if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { - printk(KERN_ERR "Received SCSI CDB with command_size: %d that" + pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); return -EINVAL; @@ -1817,8 +1691,8 @@ int transport_generic_allocate_tasks( if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); - if (!(cmd->t_task_cdb)) { - printk(KERN_ERR "Unable to allocate cmd->t_task_cdb" + if (!cmd->t_task_cdb) { + pr_err("Unable to allocate cmd->t_task_cdb" " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), (unsigned long)sizeof(cmd->__t_task_cdb)); @@ -1864,7 +1738,7 @@ int transport_generic_handle_cdb( { if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "cmd->se_lun is NULL\n"); + pr_err("cmd->se_lun is NULL\n"); return -EINVAL; } @@ -1882,12 +1756,12 @@ int transport_handle_cdb_direct( { if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "cmd->se_lun is NULL\n"); + pr_err("cmd->se_lun is NULL\n"); return -EINVAL; } if (in_interrupt()) { dump_stack(); - printk(KERN_ERR "transport_generic_handle_cdb cannot be called" + pr_err("transport_generic_handle_cdb cannot be called" " from interrupt context\n"); return -EINVAL; } @@ -1906,7 +1780,7 @@ int transport_generic_handle_cdb_map( { if (!cmd->se_lun) { dump_stack(); - printk(KERN_ERR "cmd->se_lun is NULL\n"); + pr_err("cmd->se_lun is NULL\n"); return -EINVAL; } @@ -1975,7 +1849,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) unsigned long flags; int ret = 0; - DEBUG_TS("ITT[0x%08x] - Stopping tasks\n", + pr_debug("ITT[0x%08x] - Stopping tasks\n", cmd->se_tfo->get_task_tag(cmd)); /* @@ -1984,7 +1858,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); list_for_each_entry_safe(task, task_tmp, &cmd->t_task_list, t_list) { - DEBUG_TS("task_no[%d] - Processing task %p\n", + pr_debug("task_no[%d] - Processing task %p\n", task->task_no, task); /* * If the struct se_task has not been sent and is not active, @@ -1997,7 +1871,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) transport_remove_task_from_execute_queue(task, task->se_dev); - DEBUG_TS("task_no[%d] - Removed from execute queue\n", + pr_debug("task_no[%d] - Removed from execute queue\n", task->task_no); spin_lock_irqsave(&cmd->t_state_lock, flags); continue; @@ -2012,10 +1886,10 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); - DEBUG_TS("task_no[%d] - Waiting to complete\n", + pr_debug("task_no[%d] - Waiting to complete\n", task->task_no); wait_for_completion(&task->task_stop_comp); - DEBUG_TS("task_no[%d] - Stopped successfully\n", + pr_debug("task_no[%d] - Stopped successfully\n", task->task_no); spin_lock_irqsave(&cmd->t_state_lock, flags); @@ -2024,7 +1898,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd) atomic_set(&task->task_active, 0); atomic_set(&task->task_stop, 0); } else { - DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no); + pr_debug("task_no[%d] - Did nothing\n", task->task_no); ret++; } @@ -2046,18 +1920,18 @@ static void transport_generic_request_failure( { int ret = 0; - DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" + pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x" " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd), cmd->t_task_cdb[0]); - DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:" + pr_debug("-----[ i_state: %d t_state/def_t_state:" " %d/%d transport_error_status: %d\n", cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, cmd->transport_error_status); - DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d" + pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d" " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" " t_transport_active: %d t_transport_stop: %d" - " t_transport_sent: %d\n", cmd->t_task_cdbs, + " t_transport_sent: %d\n", cmd->t_task_list_num, atomic_read(&cmd->t_task_cdbs_left), atomic_read(&cmd->t_task_cdbs_sent), atomic_read(&cmd->t_task_cdbs_ex_left), @@ -2146,7 +2020,7 @@ static void transport_generic_request_failure( */ break; default: - printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n", + pr_err("Unknown transport error for CDB 0x%02x: %d\n", cmd->t_task_cdb[0], cmd->transport_error_status); cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; @@ -2164,7 +2038,7 @@ static void transport_generic_request_failure( check_stop: transport_lun_remove_cmd(cmd); - if (!(transport_cmd_check_stop_to_fabric(cmd))) + if (!transport_cmd_check_stop_to_fabric(cmd)) ; return; @@ -2178,7 +2052,7 @@ static void transport_direct_request_timeout(struct se_cmd *cmd) unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!(atomic_read(&cmd->t_transport_timeout))) { + if (!atomic_read(&cmd->t_transport_timeout)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } @@ -2262,7 +2136,7 @@ static void transport_task_timeout_handler(unsigned long data) struct se_cmd *cmd = task->task_se_cmd; unsigned long flags; - DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd); + pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); spin_lock_irqsave(&cmd->t_state_lock, flags); if (task->task_flags & TF_STOP) { @@ -2274,8 +2148,8 @@ static void transport_task_timeout_handler(unsigned long data) /* * Determine if transport_complete_task() has already been called. */ - if (!(atomic_read(&task->task_active))) { - DEBUG_TT("transport task: %p cmd: %p timeout task_active" + if (!atomic_read(&task->task_active)) { + pr_debug("transport task: %p cmd: %p timeout task_active" " == 0\n", task, cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; @@ -2290,20 +2164,20 @@ static void transport_task_timeout_handler(unsigned long data) task->task_scsi_status = 1; if (atomic_read(&task->task_stop)) { - DEBUG_TT("transport task: %p cmd: %p timeout task_stop" + pr_debug("transport task: %p cmd: %p timeout task_stop" " == 1\n", task, cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete(&task->task_stop_comp); return; } - if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) { - DEBUG_TT("transport task: %p cmd: %p timeout non zero" + if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { + pr_debug("transport task: %p cmd: %p timeout non zero" " t_task_cdbs_left\n", task, cmd); spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } - DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", + pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", task, cmd); cmd->t_state = TRANSPORT_COMPLETE_FAILURE; @@ -2326,7 +2200,7 @@ static void transport_start_task_timer(struct se_task *task) * If the task_timeout is disabled, exit now. */ timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; - if (!(timeout)) + if (!timeout) return; init_timer(&task->task_timer); @@ -2337,7 +2211,7 @@ static void transport_start_task_timer(struct se_task *task) task->task_flags |= TF_RUNNING; add_timer(&task->task_timer); #if 0 - printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:" + pr_debug("Starting task timer for cmd: %p task: %p seconds:" " %d\n", task->task_se_cmd, task, timeout); #endif } @@ -2349,7 +2223,7 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) { struct se_cmd *cmd = task->task_se_cmd; - if (!(task->task_flags & TF_RUNNING)) + if (!task->task_flags & TF_RUNNING) return; task->task_flags |= TF_STOP; @@ -2404,9 +2278,9 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) if (cmd->sam_task_attr == MSG_HEAD_TAG) { atomic_inc(&cmd->se_dev->dev_hoq_count); smp_mb__after_atomic_inc(); - DEBUG_STA("Added HEAD_OF_QUEUE for CDB:" + pr_debug("Added HEAD_OF_QUEUE for CDB:" " 0x%02x, se_ordered_id: %u\n", - cmd->_task_cdb[0], + cmd->t_task_cdb[0], cmd->se_ordered_id); return 1; } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { @@ -2418,7 +2292,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) atomic_inc(&cmd->se_dev->dev_ordered_sync); smp_mb__after_atomic_inc(); - DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered" + pr_debug("Added ORDERED for CDB: 0x%02x to ordered" " list, se_ordered_id: %u\n", cmd->t_task_cdb[0], cmd->se_ordered_id); @@ -2427,7 +2301,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) * no other older commands exist that need to be * completed first. */ - if (!(atomic_read(&cmd->se_dev->simple_cmds))) + if (!atomic_read(&cmd->se_dev->simple_cmds)) return 1; } else { /* @@ -2452,7 +2326,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd) &cmd->se_dev->delayed_cmd_list); spin_unlock(&cmd->se_dev->delayed_cmd_lock); - DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to" + pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to" " delayed CMD list, se_ordered_id: %u\n", cmd->t_task_cdb[0], cmd->sam_task_attr, cmd->se_ordered_id); @@ -2486,7 +2360,7 @@ static int transport_execute_tasks(struct se_cmd *cmd) * Call transport_cmd_check_stop() to see if a fabric exception * has occurred that prevents execution. */ - if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) { + if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) { /* * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE * attribute for the tasks of the received struct se_cmd CDB @@ -2777,7 +2651,7 @@ static inline u32 transport_get_size( return sectors; } #if 0 - printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for" + pr_debug("Returning block_size: %u, sectors: %u == %u for" " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors, dev->transport->name); @@ -2832,8 +2706,8 @@ static void transport_xor_callback(struct se_cmd *cmd) * 5) transfer the resulting XOR data to the data-in buffer. */ buf = kmalloc(cmd->data_length, GFP_KERNEL); - if (!(buf)) { - printk(KERN_ERR "Unable to allocate xor_callback buf\n"); + if (!buf) { + pr_err("Unable to allocate xor_callback buf\n"); return; } /* @@ -2893,18 +2767,18 @@ static int transport_get_sense_data(struct se_cmd *cmd) continue; dev = task->se_dev; - if (!(dev)) + if (!dev) continue; if (!dev->transport->get_sense_buffer) { - printk(KERN_ERR "dev->transport->get_sense_buffer" + pr_err("dev->transport->get_sense_buffer" " is NULL\n"); continue; } sense_buffer = dev->transport->get_sense_buffer(task); - if (!(sense_buffer)) { - printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate" + if (!sense_buffer) { + pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" " sense buffer for task with sense\n", cmd->se_tfo->get_task_tag(cmd), task->task_no); continue; @@ -2921,7 +2795,7 @@ static int transport_get_sense_data(struct se_cmd *cmd) cmd->scsi_sense_length = (TRANSPORT_SENSE_BUFFER + offset); - printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" + pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x" " and sense\n", dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); @@ -2969,13 +2843,12 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd) sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size); - if ((cmd->t_task_lba + sectors) > - transport_dev_end_lba(dev)) { - printk(KERN_ERR "LBA: %llu Sectors: %u exceeds" + if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) { + pr_err("LBA: %llu Sectors: %u exceeds" " transport_dev_end_lba(): %llu\n", cmd->t_task_lba, sectors, transport_dev_end_lba(dev)); - printk(KERN_ERR " We should return CHECK_CONDITION" + pr_err(" We should return CHECK_CONDITION" " but we don't yet\n"); return 0; } @@ -3026,7 +2899,7 @@ static int transport_generic_cmd_sequencer( */ if (ret > 0) { #if 0 - printk(KERN_INFO "[%s]: ALUA TG Port not available," + pr_debug("[%s]: ALUA TG Port not available," " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", cmd->se_tfo->get_fabric_name(), alua_ascq); #endif @@ -3192,10 +3065,13 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; - if (sectors != 0) + if (sectors) size = transport_get_size(sectors, cdb, cmd); - else - size = dev->se_sub_dev->se_dev_attrib.block_size; + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not" + " supported\n"); + goto out_invalid_cdb_field; + } cmd->t_task_lba = get_unaligned_be64(&cdb[12]); cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; @@ -3207,7 +3083,7 @@ static int transport_generic_cmd_sequencer( break; if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { - printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" + pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); goto out_invalid_cdb_field; @@ -3217,13 +3093,13 @@ static int transport_generic_cmd_sequencer( * tpws with the UNMAP=1 bit set. */ if (!(cdb[10] & 0x08)) { - printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not" + pr_err("WRITE_SAME w/o UNMAP bit not" " supported for Block Discard Emulation\n"); goto out_invalid_cdb_field; } break; default: - printk(KERN_ERR "VARIABLE_LENGTH_CMD service action" + pr_err("VARIABLE_LENGTH_CMD service action" " 0x%04x not supported\n", service_action); goto out_unsupported_cdb; } @@ -3469,10 +3345,12 @@ static int transport_generic_cmd_sequencer( if (sector_ret) goto out_unsupported_cdb; - if (sectors != 0) + if (sectors) size = transport_get_size(sectors, cdb, cmd); - else - size = dev->se_sub_dev->se_dev_attrib.block_size; + else { + pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); + goto out_invalid_cdb_field; + } cmd->t_task_lba = get_unaligned_be16(&cdb[2]); passthrough = (dev->transport->transport_type == @@ -3484,9 +3362,9 @@ static int transport_generic_cmd_sequencer( * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and * TCM/FILEIO subsystem plugin backstores. */ - if (!(passthrough)) { + if (!passthrough) { if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { - printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA" + pr_err("WRITE_SAME PBDATA and LBDATA" " bits not supported for Block Discard" " Emulation\n"); goto out_invalid_cdb_field; @@ -3496,7 +3374,7 @@ static int transport_generic_cmd_sequencer( * tpws with the UNMAP=1 bit set. */ if (!(cdb[1] & 0x08)) { - printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not " + pr_err("WRITE_SAME w/o UNMAP bit not " " supported for Block Discard Emulation\n"); goto out_invalid_cdb_field; } @@ -3532,7 +3410,7 @@ static int transport_generic_cmd_sequencer( cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; break; default: - printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode" + pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" " 0x%02x, sending CHECK_CONDITION.\n", cmd->se_tfo->get_fabric_name(), cdb[0]); cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks; @@ -3540,7 +3418,7 @@ static int transport_generic_cmd_sequencer( } if (size != cmd->data_length) { - printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:" + pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" " 0x%02x\n", cmd->se_tfo->get_fabric_name(), cmd->data_length, size, cdb[0]); @@ -3548,7 +3426,7 @@ static int transport_generic_cmd_sequencer( cmd->cmd_spdtl = size; if (cmd->data_direction == DMA_TO_DEVICE) { - printk(KERN_ERR "Rejecting underflow/overflow" + pr_err("Rejecting underflow/overflow" " WRITE data\n"); goto out_invalid_cdb_field; } @@ -3556,8 +3434,8 @@ static int transport_generic_cmd_sequencer( * Reject READ_* or WRITE_* with overflow/underflow for * type SCF_SCSI_DATA_SG_IO_CDB. */ - if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { - printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op" + if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) { + pr_err("Failing OVERFLOW/UNDERFLOW for LBA op" " CDB on non 512-byte sector setup subsystem" " plugin: %s\n", dev->transport->name); /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ @@ -3607,14 +3485,14 @@ static void transport_complete_task_attr(struct se_cmd *cmd) atomic_dec(&dev->simple_cmds); smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for" + pr_debug("Incremented dev->dev_cur_ordered_id: %u for" " SIMPLE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_HEAD_TAG) { atomic_dec(&dev->dev_hoq_count); smp_mb__after_atomic_dec(); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev_cur_ordered_id: %u for" + pr_debug("Incremented dev_cur_ordered_id: %u for" " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { @@ -3625,7 +3503,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) spin_unlock(&dev->ordered_cmd_lock); dev->dev_cur_ordered_id++; - DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:" + pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } /* @@ -3640,10 +3518,10 @@ static void transport_complete_task_attr(struct se_cmd *cmd) list_del(&cmd_p->se_delayed_node); spin_unlock(&dev->delayed_cmd_lock); - DEBUG_STA("Calling add_tasks() for" + pr_debug("Calling add_tasks() for" " cmd_p: 0x%02x Task Attr: 0x%02x" " Dormant -> Active, se_ordered_id: %u\n", - T_TASK(cmd_p)->t_task_cdb[0], + cmd_p->t_task_cdb[0], cmd_p->sam_task_attr, cmd_p->se_ordered_id); transport_add_tasks_from_cmd(cmd_p); @@ -3812,7 +3690,7 @@ done: return; queue_full: - printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p," + pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); } @@ -3837,49 +3715,34 @@ static void transport_free_dev_tasks(struct se_cmd *cmd) if (task->se_dev) task->se_dev->transport->free_task(task); else - printk(KERN_ERR "task[%u] - task->se_dev is NULL\n", + pr_err("task[%u] - task->se_dev is NULL\n", task->task_no); spin_lock_irqsave(&cmd->t_state_lock, flags); } spin_unlock_irqrestore(&cmd->t_state_lock, flags); } -static inline void transport_free_pages(struct se_cmd *cmd) +static inline void transport_free_sgl(struct scatterlist *sgl, int nents) { struct scatterlist *sg; - int free_page = 1; int count; - if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) - free_page = 0; - if (cmd->se_dev->transport->do_se_mem_map) - free_page = 0; + for_each_sg(sgl, sg, nents, count) + __free_page(sg_page(sg)); - for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) { - /* - * Only called if - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, - */ - if (free_page) - __free_page(sg_page(sg)); + kfree(sgl); +} - } - if (free_page) - kfree(cmd->t_data_sg); +static inline void transport_free_pages(struct se_cmd *cmd) +{ + if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) + return; + + transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents); cmd->t_data_sg = NULL; cmd->t_data_nents = 0; - for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { - /* - * Only called if - * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use, - */ - if (free_page) - __free_page(sg_page(sg)); - - } - if (free_page) - kfree(cmd->t_bidi_data_sg); + transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_nents = 0; } @@ -3895,7 +3758,7 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) spin_lock_irqsave(&cmd->t_state_lock, flags); if (atomic_read(&cmd->t_fe_count)) { - if (!(atomic_dec_and_test(&cmd->t_fe_count))) { + if (!atomic_dec_and_test(&cmd->t_fe_count)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 1; @@ -3903,7 +3766,7 @@ static inline int transport_dec_and_check(struct se_cmd *cmd) } if (atomic_read(&cmd->t_se_count)) { - if (!(atomic_dec_and_test(&cmd->t_se_count))) { + if (!atomic_dec_and_test(&cmd->t_se_count)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 1; @@ -3922,7 +3785,7 @@ static void transport_release_fe_cmd(struct se_cmd *cmd) return; spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!(atomic_read(&cmd->transport_dev_active))) { + if (!atomic_read(&cmd->transport_dev_active)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto free_pages; } @@ -3953,7 +3816,7 @@ transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) } spin_lock_irqsave(&cmd->t_state_lock, flags); - if (!(atomic_read(&cmd->transport_dev_active))) { + if (!atomic_read(&cmd->transport_dev_active)) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); goto free_pages; } @@ -4027,7 +3890,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) DMA_FROM_DEVICE, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); - if (!rc) { + if (rc <= 0) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -4046,7 +3909,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd) cmd->data_direction, cmd->t_data_sg, cmd->t_data_nents); - if (!task_cdbs) { + if (task_cdbs <= 0) { cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -4094,12 +3957,6 @@ transport_generic_get_mem(struct se_cmd *cmd) struct page *page; int i = 0; - /* - * If the device uses memory mapping this is enough. - */ - if (cmd->se_dev->transport->do_se_mem_map) - return 0; - nents = DIV_ROUND_UP(length, PAGE_SIZE); cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL); if (!cmd->t_data_sg) @@ -4176,14 +4033,14 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) if (!sg_first) { sg_first = task->task_sg; - chained_nents = task->task_sg_num; + chained_nents = task->task_sg_nents; } else { sg_chain(sg_prev, sg_prev_nents, task->task_sg); - chained_nents += task->task_sg_num; + chained_nents += task->task_sg_nents; } sg_prev = task->task_sg; - sg_prev_nents = task->task_sg_num; + sg_prev_nents = task->task_sg_nents; } /* * Setup the starting pointer and total t_tasks_sg_linked_no including @@ -4192,19 +4049,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd) cmd->t_tasks_sg_chained = sg_first; cmd->t_tasks_sg_chained_no = chained_nents; - DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" + pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and" " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained, cmd->t_tasks_sg_chained_no); for_each_sg(cmd->t_tasks_sg_chained, sg, cmd->t_tasks_sg_chained_no, i) { - DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n", + pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n", i, sg, sg_page(sg), sg->length, sg->offset); if (sg_is_chain(sg)) - DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg); + pr_debug("SG: %p sg_is_chain=1\n", sg); if (sg_is_last(sg)) - DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg); + pr_debug("SG: %p sg_is_last=1\n", sg); } } EXPORT_SYMBOL(transport_do_task_sg_chain); @@ -4266,25 +4123,25 @@ static int transport_allocate_data_tasks( * It's so much easier and only a waste when task_count > 1. * That is extremely rare. */ - task->task_sg_num = sgl_nents; + task->task_sg_nents = sgl_nents; if (cmd->se_tfo->task_sg_chaining) { - task->task_sg_num++; + task->task_sg_nents++; task->task_padded_sg = 1; } task->task_sg = kmalloc(sizeof(struct scatterlist) * \ - task->task_sg_num, GFP_KERNEL); + task->task_sg_nents, GFP_KERNEL); if (!task->task_sg) { cmd->se_dev->transport->free_task(task); return -ENOMEM; } - sg_init_table(task->task_sg, task->task_sg_num); + sg_init_table(task->task_sg, task->task_sg_nents); task_size = task->task_size; /* Build new sgl, only up to task_size */ - for_each_sg(task->task_sg, sg, task->task_sg_num, count) { + for_each_sg(task->task_sg, sg, task->task_sg_nents, count) { if (cmd_sg->length > task_size) break; @@ -4311,6 +4168,7 @@ transport_allocate_control_task(struct se_cmd *cmd) unsigned char *cdb; struct se_task *task; unsigned long flags; + int ret = 0; task = transport_generic_get_task(cmd, cmd->data_direction); if (!task) @@ -4331,7 +4189,7 @@ transport_allocate_control_task(struct se_cmd *cmd) memcpy(task->task_sg, cmd->t_data_sg, sizeof(struct scatterlist) * cmd->t_data_nents); task->task_size = cmd->data_length; - task->task_sg_num = cmd->t_data_nents; + task->task_sg_nents = cmd->t_data_nents; spin_lock_irqsave(&cmd->t_state_lock, flags); list_add_tail(&task->t_list, &cmd->t_task_list); @@ -4339,16 +4197,19 @@ transport_allocate_control_task(struct se_cmd *cmd) if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { if (dev->transport->map_task_SG) - return dev->transport->map_task_SG(task); - return 0; + ret = dev->transport->map_task_SG(task); } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { if (dev->transport->cdb_none) - return dev->transport->cdb_none(task); - return 0; + ret = dev->transport->cdb_none(task); } else { + pr_err("target: Unknown control cmd type!\n"); BUG(); - return -ENOMEM; } + + /* Success! Return number of tasks allocated */ + if (ret == 0) + return 1; + return ret; } static u32 transport_allocate_tasks( @@ -4358,18 +4219,12 @@ static u32 transport_allocate_tasks( struct scatterlist *sgl, unsigned int sgl_nents) { - int ret; - - if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { + if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) return transport_allocate_data_tasks(cmd, lba, data_direction, sgl, sgl_nents); - } else { - ret = transport_allocate_control_task(cmd); - if (ret < 0) - return ret; - else - return 1; - } + else + return transport_allocate_control_task(cmd); + } @@ -4441,64 +4296,6 @@ EXPORT_SYMBOL(transport_generic_new_cmd); */ void transport_generic_process_write(struct se_cmd *cmd) { -#if 0 - /* - * Copy SCSI Presented DTL sector(s) from received buffers allocated to - * original EDTL - */ - if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { - if (!cmd->t_tasks_se_num) { - unsigned char *dst, *buf = - (unsigned char *)cmd->t_task_buf; - - dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL); - if (!(dst)) { - printk(KERN_ERR "Unable to allocate memory for" - " WRITE underflow\n"); - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - return; - } - memcpy(dst, buf, cmd->cmd_spdtl); - - kfree(cmd->t_task_buf); - cmd->t_task_buf = dst; - } else { - struct scatterlist *sg = - (struct scatterlist *sg)cmd->t_task_buf; - struct scatterlist *orig_sg; - - orig_sg = kzalloc(sizeof(struct scatterlist) * - cmd->t_tasks_se_num, - GFP_KERNEL))) { - if (!(orig_sg)) { - printk(KERN_ERR "Unable to allocate memory" - " for WRITE underflow\n"); - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - return; - } - - memcpy(orig_sg, cmd->t_task_buf, - sizeof(struct scatterlist) * - cmd->t_tasks_se_num); - - cmd->data_length = cmd->cmd_spdtl; - /* - * FIXME, clear out original struct se_task and state - * information. - */ - if (transport_generic_new_cmd(cmd) < 0) { - transport_generic_request_failure(cmd, NULL, - PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1); - kfree(orig_sg); - return; - } - - transport_memcpy_write_sg(cmd, orig_sg); - } - } -#endif transport_execute_tasks(cmd); } EXPORT_SYMBOL(transport_generic_process_write); @@ -4554,7 +4351,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd) return PYX_TRANSPORT_WRITE_PENDING; queue_full: - printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); + pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); cmd->t_state = TRANSPORT_COMPLETE_QF_WP; transport_handle_queue_full(cmd, cmd->se_dev, transport_write_pending_qf); @@ -4586,7 +4383,7 @@ void transport_generic_free_cmd( if (cmd->se_lun) { #if 0 - printk(KERN_INFO "cmd: %p ITT: 0x%08x contains" + pr_debug("cmd: %p ITT: 0x%08x contains" " cmd->se_lun\n", cmd, cmd->se_tfo->get_task_tag(cmd)); #endif @@ -4627,7 +4424,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) spin_lock_irqsave(&cmd->t_state_lock, flags); if (atomic_read(&cmd->t_transport_stop)) { atomic_set(&cmd->transport_lun_stop, 0); - DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop ==" + pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop ==" " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd)); spin_unlock_irqrestore(&cmd->t_state_lock, flags); transport_cmd_check_stop(cmd, 1, 0); @@ -4640,13 +4437,13 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) ret = transport_stop_tasks_for_cmd(cmd); - DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:" - " %d\n", cmd, cmd->t_task_cdbs, ret); + pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:" + " %d\n", cmd, cmd->t_task_list_num, ret); if (!ret) { - DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n", + pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n", cmd->se_tfo->get_task_tag(cmd)); wait_for_completion(&cmd->transport_lun_stop_comp); - DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n", + pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n", cmd->se_tfo->get_task_tag(cmd)); } transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); @@ -4654,13 +4451,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun) return 0; } -/* #define DEBUG_CLEAR_LUN */ -#ifdef DEBUG_CLEAR_LUN -#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x) -#else -#define DEBUG_CLEAR_L(x...) -#endif - static void __transport_clear_lun_from_sessions(struct se_lun *lun) { struct se_cmd *cmd = NULL; @@ -4682,7 +4472,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) * progress for the iscsi_cmd_t. */ spin_lock(&cmd->t_state_lock); - DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport" + pr_debug("SE_LUN[%d] - Setting cmd->transport" "_lun_stop for ITT: 0x%08x\n", cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); @@ -4691,8 +4481,8 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags); - if (!(cmd->se_lun)) { - printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n", + if (!cmd->se_lun) { + pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n", cmd->se_tfo->get_task_tag(cmd), cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); BUG(); @@ -4701,7 +4491,7 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) * If the Storage engine still owns the iscsi_cmd_t, determine * and/or stop its context. */ - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport" + pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport" "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); @@ -4710,13 +4500,13 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun) continue; } - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun" + pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun" "_wait_for_tasks(): SUCCESS\n", cmd->se_lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); - if (!(atomic_read(&cmd->transport_dev_active))) { + if (!atomic_read(&cmd->transport_dev_active)) { spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); goto check_cond; } @@ -4741,7 +4531,7 @@ check_cond: */ spin_lock_irqsave(&cmd->t_state_lock, cmd_flags); if (atomic_read(&cmd->transport_lun_fe_stop)) { - DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for" + pr_debug("SE_LUN[%d] - Detected FE stop for" " struct se_cmd: %p ITT: 0x%08x\n", lun->unpacked_lun, cmd, cmd->se_tfo->get_task_tag(cmd)); @@ -4753,7 +4543,7 @@ check_cond: spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags); continue; } - DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n", + pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n", lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd)); spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags); @@ -4779,7 +4569,7 @@ int transport_clear_lun_from_sessions(struct se_lun *lun) kt = kthread_run(transport_clear_lun_thread, lun, "tcm_cl_%u", lun->unpacked_lun); if (IS_ERR(kt)) { - printk(KERN_ERR "Unable to start clear_lun thread\n"); + pr_err("Unable to start clear_lun thread\n"); return PTR_ERR(kt); } wait_for_completion(&lun->lun_shutdown_comp); @@ -4812,7 +4602,7 @@ static void transport_generic_wait_for_tasks( */ if (atomic_read(&cmd->transport_lun_stop)) { - DEBUG_TRANSPORT_S("wait_for_tasks: Stopping" + pr_debug("wait_for_tasks: Stopping" " wait_for_completion(&cmd->t_tasktransport_lun_fe" "_stop_comp); for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); @@ -4834,7 +4624,7 @@ static void transport_generic_wait_for_tasks( * struct se_cmd, now owns the structure and can be released through * normal means below. */ - DEBUG_TRANSPORT_S("wait_for_tasks: Stopped" + pr_debug("wait_for_tasks: Stopped" " wait_for_completion(&cmd->t_tasktransport_lun_fe_" "stop_comp); for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); @@ -4847,7 +4637,7 @@ static void transport_generic_wait_for_tasks( atomic_set(&cmd->t_transport_stop, 1); - DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x" + pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, @@ -4863,7 +4653,7 @@ static void transport_generic_wait_for_tasks( atomic_set(&cmd->t_transport_active, 0); atomic_set(&cmd->t_transport_stop, 0); - DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion(" + pr_debug("wait_for_tasks: Stopped wait_for_compltion(" "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n", cmd->se_tfo->get_task_tag(cmd)); remove: @@ -5071,11 +4861,11 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status) int ret = 0; if (atomic_read(&cmd->t_transport_aborted) != 0) { - if (!(send_status) || + if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS)) return 1; #if 0 - printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED" + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED" " status for CDB: 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); @@ -5107,7 +4897,7 @@ void transport_send_task_abort(struct se_cmd *cmd) } cmd->scsi_status = SAM_STAT_TASK_ABORTED; #if 0 - printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," + pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x," " ITT: 0x%08x\n", cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd)); #endif @@ -5145,7 +4935,7 @@ int transport_generic_do_tmr(struct se_cmd *cmd) tmr->response = TMR_FUNCTION_REJECTED; break; default: - printk(KERN_ERR "Uknown TMR function: 0x%02x.\n", + pr_err("Uknown TMR function: 0x%02x.\n", tmr->function); tmr->response = TMR_FUNCTION_REJECTED; break; @@ -5190,7 +4980,7 @@ static void transport_processing_shutdown(struct se_device *dev) spin_lock_irqsave(&dev->execute_task_lock, flags); while ((task = transport_get_task_from_state_list(dev))) { if (!task->task_se_cmd) { - printk(KERN_ERR "task->task_se_cmd is NULL!\n"); + pr_err("task->task_se_cmd is NULL!\n"); continue; } cmd = task->task_se_cmd; @@ -5199,18 +4989,18 @@ static void transport_processing_shutdown(struct se_device *dev) spin_lock_irqsave(&cmd->t_state_lock, flags); - DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x," - " i_state/def_i_state: %d/%d, t_state/def_t_state:" + pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," + " i_state: %d, t_state/def_t_state:" " %d/%d cdb: 0x%02x\n", cmd, task, - cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn, - cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state, + cmd->se_tfo->get_task_tag(cmd), + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, cmd->deferred_t_state, cmd->t_task_cdb[0]); - DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:" + pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" " %d t_task_cdbs_sent: %d -- t_transport_active: %d" " t_transport_stop: %d t_transport_sent: %d\n", cmd->se_tfo->get_task_tag(cmd), - cmd->t_task_cdbs, + cmd->t_task_list_num, atomic_read(&cmd->t_task_cdbs_left), atomic_read(&cmd->t_task_cdbs_sent), atomic_read(&cmd->t_transport_active), @@ -5222,10 +5012,10 @@ static void transport_processing_shutdown(struct se_device *dev) spin_unlock_irqrestore( &cmd->t_state_lock, flags); - DEBUG_DO("Waiting for task: %p to shutdown for dev:" + pr_debug("Waiting for task: %p to shutdown for dev:" " %p\n", task, dev); wait_for_completion(&task->task_stop_comp); - DEBUG_DO("Completed task: %p shutdown for dev: %p\n", + pr_debug("Completed task: %p shutdown for dev: %p\n", task, dev); spin_lock_irqsave(&cmd->t_state_lock, flags); @@ -5239,11 +5029,11 @@ static void transport_processing_shutdown(struct se_device *dev) } __transport_stop_task_timer(task, &flags); - if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) { + if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { spin_unlock_irqrestore( &cmd->t_state_lock, flags); - DEBUG_DO("Skipping task: %p, dev: %p for" + pr_debug("Skipping task: %p, dev: %p for" " t_task_cdbs_ex_left: %d\n", task, dev, atomic_read(&cmd->t_task_cdbs_ex_left)); @@ -5252,7 +5042,7 @@ static void transport_processing_shutdown(struct se_device *dev) } if (atomic_read(&cmd->t_transport_active)) { - DEBUG_DO("got t_transport_active = 1 for task: %p, dev:" + pr_debug("got t_transport_active = 1 for task: %p, dev:" " %p\n", task, dev); if (atomic_read(&cmd->t_fe_count)) { @@ -5282,7 +5072,7 @@ static void transport_processing_shutdown(struct se_device *dev) spin_lock_irqsave(&dev->execute_task_lock, flags); continue; } - DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n", + pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", task, dev); if (atomic_read(&cmd->t_fe_count)) { @@ -5315,7 +5105,7 @@ static void transport_processing_shutdown(struct se_device *dev) */ while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { - DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n", + pr_debug("From Device Queue: cmd: %p t_state: %d\n", cmd, cmd->t_state); if (atomic_read(&cmd->t_fe_count)) { @@ -5368,8 +5158,8 @@ get_cmd: switch (cmd->t_state) { case TRANSPORT_NEW_CMD_MAP: - if (!(cmd->se_tfo->new_cmd_map)) { - printk(KERN_ERR "cmd->se_tfo->new_cmd_map is" + if (!cmd->se_tfo->new_cmd_map) { + pr_err("cmd->se_tfo->new_cmd_map is" " NULL for TRANSPORT_NEW_CMD_MAP\n"); BUG(); } @@ -5420,7 +5210,7 @@ get_cmd: transport_generic_write_pending(cmd); break; default: - printk(KERN_ERR "Unknown t_state: %d deferred_t_state:" + pr_err("Unknown t_state: %d deferred_t_state:" " %d for ITT: 0x%08x i_state: %d on SE LUN:" " %u\n", cmd->t_state, cmd->deferred_t_state, cmd->se_tfo->get_task_tag(cmd), diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index d28e9c4a1c99..31e3c652527e 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -49,15 +49,15 @@ int core_scsi3_ua_check( struct se_session *sess = cmd->se_sess; struct se_node_acl *nacl; - if (!(sess)) + if (!sess) return 0; nacl = sess->se_node_acl; - if (!(nacl)) + if (!nacl) return 0; deve = &nacl->device_list[cmd->orig_fe_lun]; - if (!(atomic_read(&deve->ua_count))) + if (!atomic_read(&deve->ua_count)) return 0; /* * From sam4r14, section 5.14 Unit attention condition: @@ -97,12 +97,12 @@ int core_scsi3_ua_allocate( /* * PASSTHROUGH OPS */ - if (!(nacl)) + if (!nacl) return -EINVAL; ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC); - if (!(ua)) { - printk(KERN_ERR "Unable to allocate struct se_ua\n"); + if (!ua) { + pr_err("Unable to allocate struct se_ua\n"); return -ENOMEM; } INIT_LIST_HEAD(&ua->ua_dev_list); @@ -177,7 +177,7 @@ int core_scsi3_ua_allocate( spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); - printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" + pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:" " 0x%02x, ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, asc, ascq); @@ -215,16 +215,16 @@ void core_scsi3_ua_for_check_condition( struct se_ua *ua = NULL, *ua_p; int head = 1; - if (!(sess)) + if (!sess) return; nacl = sess->se_node_acl; - if (!(nacl)) + if (!nacl) return; spin_lock_irq(&nacl->device_list_lock); deve = &nacl->device_list[cmd->orig_fe_lun]; - if (!(atomic_read(&deve->ua_count))) { + if (!atomic_read(&deve->ua_count)) { spin_unlock_irq(&nacl->device_list_lock); return; } @@ -264,7 +264,7 @@ void core_scsi3_ua_for_check_condition( spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); - printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with" + pr_debug("[%s]: %s UNIT ATTENTION condition with" " INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x" " reported ASC: 0x%02x, ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), @@ -284,16 +284,16 @@ int core_scsi3_ua_clear_for_request_sense( struct se_ua *ua = NULL, *ua_p; int head = 1; - if (!(sess)) + if (!sess) return -EINVAL; nacl = sess->se_node_acl; - if (!(nacl)) + if (!nacl) return -EINVAL; spin_lock_irq(&nacl->device_list_lock); deve = &nacl->device_list[cmd->orig_fe_lun]; - if (!(atomic_read(&deve->ua_count))) { + if (!atomic_read(&deve->ua_count)) { spin_unlock_irq(&nacl->device_list_lock); return -EPERM; } @@ -323,7 +323,7 @@ int core_scsi3_ua_clear_for_request_sense( spin_unlock(&deve->ua_lock); spin_unlock_irq(&nacl->device_list_lock); - printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped" + pr_debug("[%s]: Released UNIT ATTENTION condition, mapped" " LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x," " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(), cmd->orig_fe_lun, *asc, *ascq); diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index 8d26779e440c..f7fff7ed63c3 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -23,30 +23,6 @@ #define FT_TPG_NAMELEN 32 /* max length of TPG name */ #define FT_LUN_NAMELEN 32 /* max length of LUN name */ -/* - * Debug options. - */ -#define FT_DEBUG_CONF 0x01 /* configuration messages */ -#define FT_DEBUG_SESS 0x02 /* session messages */ -#define FT_DEBUG_TM 0x04 /* TM operations */ -#define FT_DEBUG_IO 0x08 /* I/O commands */ -#define FT_DEBUG_DATA 0x10 /* Data transfer */ - -extern unsigned int ft_debug_logging; /* debug options */ - -#define FT_DEBUG(mask, fmt, args...) \ - do { \ - if (ft_debug_logging & (mask)) \ - printk(KERN_INFO "tcm_fc: %s: " fmt, \ - __func__, ##args); \ - } while (0) - -#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args) -#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args) -#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args) -#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args) -#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args) - struct ft_transport_id { __u8 format; __u8 __resvd1[7]; diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 9365e53947ad..a9e9a31da11d 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -62,22 +62,19 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) struct scatterlist *sg; int count; - if (!(ft_debug_logging & FT_DEBUG_IO)) - return; - se_cmd = &cmd->se_cmd; - printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n", + pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); - printk(KERN_INFO "%s: cmd %p cdb %p\n", + pr_debug("%s: cmd %p cdb %p\n", caller, cmd, cmd->cdb); - printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun); + pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); - printk(KERN_INFO "%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", + pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n", caller, cmd, se_cmd->t_data_nents, se_cmd->data_length, se_cmd->se_cmd_flags); for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count) - printk(KERN_INFO "%s: cmd %p sg %p page %p " + pr_debug("%s: cmd %p sg %p page %p " "len 0x%x off 0x%x\n", caller, cmd, sg, sg_page(sg), sg->length, sg->offset); @@ -85,7 +82,7 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) sp = cmd->seq; if (sp) { ep = fc_seq_exch(sp); - printk(KERN_INFO "%s: cmd %p sid %x did %x " + pr_debug("%s: cmd %p sid %x did %x " "ox_id %x rx_id %x seq_id %x e_stat %x\n", caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid, sp->id, ep->esb_stat); @@ -321,7 +318,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg) case FC_RCTL_DD_SOL_CTL: /* transfer ready */ case FC_RCTL_DD_DATA_DESC: /* transfer ready */ default: - printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", + pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); fc_frame_free(fp); transport_generic_free_cmd(&cmd->se_cmd, 0, 0); @@ -346,7 +343,7 @@ static void ft_send_resp_status(struct fc_lport *lport, struct fcp_resp_rsp_info *info; fh = fc_frame_header_get(rx_fp); - FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n", + pr_debug("FCP error response: did %x oxid %x status %x code %x\n", ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); len = sizeof(*fcp); if (status == SAM_STAT_GOOD) @@ -416,15 +413,15 @@ static void ft_send_tm(struct ft_cmd *cmd) * FCP4r01 indicates having a combination of * tm_flags set is invalid. */ - FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); + pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); return; } - FT_TM_DBG("alloc tm cmd fn %d\n", tm_func); + pr_debug("alloc tm cmd fn %d\n", tm_func); tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); if (!tmr) { - FT_TM_DBG("alloc failed\n"); + pr_debug("alloc failed\n"); ft_send_resp_code(cmd, FCP_TMF_FAILED); return; } @@ -439,7 +436,7 @@ static void ft_send_tm(struct ft_cmd *cmd) * since "unable to handle TMR request because failed * to get to LUN" */ - FT_TM_DBG("Failed to get LUN for TMR func %d, " + pr_debug("Failed to get LUN for TMR func %d, " "se_cmd %p, unpacked_lun %d\n", tm_func, &cmd->se_cmd, cmd->lun); ft_dump_cmd(cmd, __func__); @@ -490,7 +487,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd) code = FCP_TMF_FAILED; break; } - FT_TM_DBG("tmr fn %d resp %d fcp code %d\n", + pr_debug("tmr fn %d resp %d fcp code %d\n", tmr->function, tmr->response, code); ft_send_resp_code(cmd, code); return 0; @@ -518,7 +515,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) return; busy: - FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n"); + pr_debug("cmd or seq allocation failure - sending BUSY\n"); ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0); fc_frame_free(fp); ft_sess_put(sess); /* undo get from lookup */ @@ -543,7 +540,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp) case FC_RCTL_DD_DATA_DESC: /* transfer ready */ case FC_RCTL_ELS4_REQ: /* SRR, perhaps */ default: - printk(KERN_INFO "%s: unhandled frame r_ctl %x\n", + pr_debug("%s: unhandled frame r_ctl %x\n", __func__, fh->fh_r_ctl); fc_frame_free(fp); ft_sess_put(sess); /* undo get from lookup */ @@ -642,7 +639,7 @@ static void ft_send_cmd(struct ft_cmd *cmd) ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); - FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); + pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); ft_dump_cmd(cmd, __func__); if (ret == -ENOMEM) { @@ -672,7 +669,7 @@ err: */ static void ft_exec_req(struct ft_cmd *cmd) { - FT_IO_DBG("cmd state %x\n", cmd->state); + pr_debug("cmd state %x\n", cmd->state); switch (cmd->state) { case FC_CMD_ST_NEW: ft_send_cmd(cmd); diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index ec9e40dc4514..d63e3dd3b180 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -106,7 +106,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict) } err = 4; fail: - FT_CONF_DBG("err %u len %zu pos %u byte %u\n", + pr_debug("err %u len %zu pos %u byte %u\n", err, cp - name, pos, byte); return -1; } @@ -216,7 +216,7 @@ static struct se_node_acl *ft_add_acl( u64 wwpn; u32 q_depth; - FT_CONF_DBG("add acl %s\n", name); + pr_debug("add acl %s\n", name); tpg = container_of(se_tpg, struct ft_tpg, se_tpg); if (ft_parse_wwn(name, &wwpn, 1) < 0) @@ -239,11 +239,11 @@ static void ft_del_acl(struct se_node_acl *se_acl) struct ft_node_acl *acl = container_of(se_acl, struct ft_node_acl, se_node_acl); - FT_CONF_DBG("del acl %s\n", + pr_debug("del acl %s\n", config_item_name(&se_acl->acl_group.cg_item)); tpg = container_of(se_tpg, struct ft_tpg, se_tpg); - FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n", + pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n", acl, se_acl, tpg, &tpg->se_tpg); core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1); @@ -260,11 +260,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata) spin_lock_bh(&se_tpg->acl_node_lock); list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) { acl = container_of(se_acl, struct ft_node_acl, se_node_acl); - FT_CONF_DBG("acl %p port_name %llx\n", + pr_debug("acl %p port_name %llx\n", acl, (unsigned long long)acl->node_auth.port_name); if (acl->node_auth.port_name == rdata->ids.port_name || acl->node_auth.node_name == rdata->ids.node_name) { - FT_CONF_DBG("acl %p port_name %llx matched\n", acl, + pr_debug("acl %p port_name %llx matched\n", acl, (unsigned long long)rdata->ids.port_name); found = acl; /* XXX need to hold onto ACL */ @@ -281,10 +281,10 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) acl = kzalloc(sizeof(*acl), GFP_KERNEL); if (!acl) { - printk(KERN_ERR "Unable to allocate struct ft_node_acl\n"); + pr_err("Unable to allocate struct ft_node_acl\n"); return NULL; } - FT_CONF_DBG("acl %p\n", acl); + pr_debug("acl %p\n", acl); return &acl->se_node_acl; } @@ -294,7 +294,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg, struct ft_node_acl *acl = container_of(se_acl, struct ft_node_acl, se_node_acl); - FT_CONF_DBG(KERN_INFO "acl %p\n", acl); + pr_debug("acl %p\n", acl); kfree(acl); } @@ -311,7 +311,7 @@ static struct se_portal_group *ft_add_tpg( unsigned long index; int ret; - FT_CONF_DBG("tcm_fc: add tpg %s\n", name); + pr_debug("tcm_fc: add tpg %s\n", name); /* * Name must be "tpgt_" followed by the index. @@ -354,7 +354,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg) { struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg); - FT_CONF_DBG("del tpg %s\n", + pr_debug("del tpg %s\n", config_item_name(&tpg->se_tpg.tpg_group.cg_item)); kthread_stop(tpg->thread); @@ -412,7 +412,7 @@ static struct se_wwn *ft_add_lport( struct ft_lport_acl *old_lacl; u64 wwpn; - FT_CONF_DBG("add lport %s\n", name); + pr_debug("add lport %s\n", name); if (ft_parse_wwn(name, &wwpn, 1) < 0) return NULL; lacl = kzalloc(sizeof(*lacl), GFP_KERNEL); @@ -441,7 +441,7 @@ static void ft_del_lport(struct se_wwn *wwn) struct ft_lport_acl *lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn); - FT_CONF_DBG("del lport %s\n", + pr_debug("del lport %s\n", config_item_name(&wwn->wwn_group.cg_item)); mutex_lock(&ft_lport_lock); list_del(&lacl->list); @@ -581,7 +581,7 @@ int ft_register_configfs(void) */ fabric = target_fabric_configfs_init(THIS_MODULE, "fc"); if (IS_ERR(fabric)) { - printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n", + pr_err("%s: target_fabric_configfs_init() failed!\n", __func__); return PTR_ERR(fabric); } @@ -608,11 +608,8 @@ int ft_register_configfs(void) */ ret = target_fabric_configfs_register(fabric); if (ret < 0) { - FT_CONF_DBG("target_fabric_configfs_register() for" + pr_debug("target_fabric_configfs_register() for" " FC Target failed!\n"); - printk(KERN_INFO - "%s: target_fabric_configfs_register() failed!\n", - __func__); target_fabric_configfs_free(fabric); return -1; } diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index 3563a9029c4a..11e6483fc127 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -176,8 +177,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd) error = lport->tt.seq_send(lport, cmd->seq, fp); if (error) { /* XXX For now, initiator will retry */ - if (printk_ratelimit()) - printk(KERN_ERR "%s: Failed to send frame %p, " + pr_err_ratelimited("%s: Failed to send frame %p, " "xid <0x%x>, remaining %zu, " "lso_max <0x%x>\n", __func__, fp, ep->xid, @@ -222,7 +222,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) */ buf = fc_frame_payload_get(fp, 1); if (cmd->was_ddp_setup && buf) { - printk(KERN_INFO "%s: When DDP was setup, not expected to" + pr_debug("%s: When DDP was setup, not expected to" "receive frame with payload, Payload shall be" "copied directly to buffer instead of coming " "via. legacy receive queues\n", __func__); @@ -260,7 +260,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) * this point, but just in case if required in future * for debugging or any other purpose */ - printk(KERN_ERR "%s: Received frame with TSI bit not" + pr_err("%s: Received frame with TSI bit not" " being SET, dropping the frame, " "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n", __func__, cmd->sg, cmd->sg_cnt); diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index 7491e21cc6ae..fbcbb3d1d06b 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -198,13 +198,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id) if (sess->port_id == port_id) { kref_get(&sess->kref); rcu_read_unlock(); - FT_SESS_DBG("port_id %x found %p\n", port_id, sess); + pr_debug("port_id %x found %p\n", port_id, sess); return sess; } } out: rcu_read_unlock(); - FT_SESS_DBG("port_id %x not found\n", port_id); + pr_debug("port_id %x not found\n", port_id); return NULL; } @@ -240,7 +240,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, hlist_add_head_rcu(&sess->hash, head); tport->sess_count++; - FT_SESS_DBG("port_id %x sess %p\n", port_id, sess); + pr_debug("port_id %x sess %p\n", port_id, sess); transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl, sess->se_sess, sess); @@ -314,7 +314,7 @@ int ft_sess_shutdown(struct se_session *se_sess) { struct ft_sess *sess = se_sess->fabric_sess_ptr; - FT_SESS_DBG("port_id %x\n", sess->port_id); + pr_debug("port_id %x\n", sess->port_id); return 1; } @@ -335,7 +335,7 @@ void ft_sess_close(struct se_session *se_sess) mutex_unlock(&ft_lport_lock); return; } - FT_SESS_DBG("port_id %x\n", port_id); + pr_debug("port_id %x\n", port_id); ft_sess_unhash(sess); mutex_unlock(&ft_lport_lock); transport_deregister_session_configfs(se_sess); @@ -348,7 +348,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep) { struct ft_sess *sess = se_sess->fabric_sess_ptr; - FT_SESS_DBG("port_id %x\n", sess->port_id); + pr_debug("port_id %x\n", sess->port_id); } int ft_sess_logged_in(struct se_session *se_sess) @@ -458,7 +458,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len, mutex_lock(&ft_lport_lock); ret = ft_prli_locked(rdata, spp_len, rspp, spp); mutex_unlock(&ft_lport_lock); - FT_SESS_DBG("port_id %x flags %x ret %x\n", + pr_debug("port_id %x flags %x ret %x\n", rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret); return ret; } @@ -518,11 +518,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp) struct ft_sess *sess; u32 sid = fc_frame_sid(fp); - FT_SESS_DBG("sid %x\n", sid); + pr_debug("sid %x\n", sid); sess = ft_sess_get(lport, sid); if (!sess) { - FT_SESS_DBG("sid %x sess lookup failed\n", sid); + pr_debug("sid %x sess lookup failed\n", sid); /* TBD XXX - if FCP_CMND, send PRLO */ fc_frame_free(fp); return; diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 465f266a0205..8204c76ca12b 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -403,7 +403,7 @@ struct se_queue_obj { struct se_task { unsigned char task_sense; struct scatterlist *task_sg; - u32 task_sg_num; + u32 task_sg_nents; struct scatterlist *task_sg_bidi; u8 task_scsi_status; u8 task_flags; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 123df9238c68..740957655a64 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -292,7 +292,7 @@ struct se_subsystem_api { * drivers. Provided out of convenience. */ int (*transport_complete)(struct se_task *task); - struct se_task *(*alloc_task)(struct se_cmd *); + struct se_task *(*alloc_task)(unsigned char *cdb); /* * do_task(): */ @@ -341,11 +341,6 @@ struct se_subsystem_api { * Get the sector_t from a subsystem backstore.. */ sector_t (*get_blocks)(struct se_device *); - /* - * do_se_mem_map(): - */ - int (*do_se_mem_map)(struct se_task *, struct list_head *, void *, - struct se_mem *, struct se_mem **, u32 *, u32 *); /* * get_sense_buffer(): */ -- cgit v1.2.3 From 1d20bb6147954d4fbd337a3d1b40c7eeae254cd7 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 21 Jul 2011 04:41:48 +0000 Subject: target: ->map_task_SG conversion to ->map_control_SG and ->map_data_SG This patch breaks up the ->map_task_SG() backend call into two seperate ->map_control_SG() and ->map_data_SG() in order to better address IBLOCK and pSCSI. IBLOCK only allocates bios for ->map_data_SG(), and pSCSI will allocate a struct request for both cases. This patch fixes incorrect usage of ->map_task_SG() for all se_cmd descriptors in transport_generic_new_cmd() by moving the call into it's proper location directly inside of transport_allocate_data_tasks() Reported-by: Christoph Hellwig Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_iblock.c | 4 +-- drivers/target/target_core_pscsi.c | 14 +++++++---- drivers/target/target_core_transport.c | 45 +++++++++++++++++++--------------- include/target/target_core_transport.h | 8 ++++-- 4 files changed, 42 insertions(+), 29 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 251fc66a8212..7e1234105442 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -591,7 +591,7 @@ static struct bio *iblock_get_bio( return bio; } -static int iblock_map_task_SG(struct se_task *task) +static int iblock_map_data_SG(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct se_device *dev = cmd->se_dev; @@ -755,7 +755,7 @@ static struct se_subsystem_api iblock_template = { .name = "iblock", .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, - .map_task_SG = iblock_map_task_SG, + .map_data_SG = iblock_map_data_SG, .attach_hba = iblock_attach_hba, .detach_hba = iblock_detach_hba, .allocate_virtdevice = iblock_allocate_virtdevice, diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index a2ce5998d318..2b7b0da9146d 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -1049,7 +1049,7 @@ static inline struct bio *pscsi_get_bio(int sg_num) return bio; } -static int __pscsi_map_task_SG( +static int __pscsi_map_SG( struct se_task *task, struct scatterlist *task_sg, u32 task_sg_num, @@ -1198,7 +1198,10 @@ fail: return ret; } -static int pscsi_map_task_SG(struct se_task *task) +/* + * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. + */ +static int pscsi_map_SG(struct se_task *task) { int ret; @@ -1206,13 +1209,13 @@ static int pscsi_map_task_SG(struct se_task *task) * Setup the main struct request for the task->task_sg[] payload */ - ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_nents, 0); + ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); if (ret >= 0 && task->task_sg_bidi) { /* * If present, set up the extra BIDI-COMMAND SCSI READ * struct request and payload. */ - ret = __pscsi_map_task_SG(task, task->task_sg_bidi, + ret = __pscsi_map_SG(task, task->task_sg_bidi, task->task_sg_nents, 1); } @@ -1340,7 +1343,8 @@ static struct se_subsystem_api pscsi_template = { .owner = THIS_MODULE, .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, .cdb_none = pscsi_CDB_none, - .map_task_SG = pscsi_map_task_SG, + .map_control_SG = pscsi_map_SG, + .map_data_SG = pscsi_map_SG, .attach_hba = pscsi_attach_hba, .detach_hba = pscsi_detach_hba, .pmode_enable_hba = pscsi_pmode_enable_hba, diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 55b6588904a4..007cfc164f5e 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -4081,8 +4081,7 @@ static int transport_allocate_data_tasks( struct se_device *dev = cmd->se_dev; unsigned long flags; sector_t sectors; - int task_count; - int i; + int task_count, i, ret; sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; struct scatterlist *sg; @@ -4129,7 +4128,7 @@ static int transport_allocate_data_tasks( task->task_padded_sg = 1; } - task->task_sg = kmalloc(sizeof(struct scatterlist) * \ + task->task_sg = kmalloc(sizeof(struct scatterlist) * task->task_sg_nents, GFP_KERNEL); if (!task->task_sg) { cmd->se_dev->transport->free_task(task); @@ -4157,6 +4156,20 @@ static int transport_allocate_data_tasks( list_add_tail(&task->t_list, &cmd->t_task_list); spin_unlock_irqrestore(&cmd->t_state_lock, flags); } + /* + * Now perform the memory map of task->task_sg[] into backend + * subsystem memory.. + */ + list_for_each_entry(task, &cmd->t_task_list, t_list) { + if (atomic_read(&task->task_sent)) + continue; + if (!dev->transport->map_data_SG) + continue; + + ret = dev->transport->map_data_SG(task); + if (ret < 0) + return 0; + } return task_count; } @@ -4196,8 +4209,8 @@ transport_allocate_control_task(struct se_cmd *cmd) spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { - if (dev->transport->map_task_SG) - ret = dev->transport->map_task_SG(task); + if (dev->transport->map_control_SG) + ret = dev->transport->map_control_SG(task); } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { if (dev->transport->cdb_none) ret = dev->transport->cdb_none(task); @@ -4239,8 +4252,6 @@ static u32 transport_allocate_tasks( */ int transport_generic_new_cmd(struct se_cmd *cmd) { - struct se_task *task; - struct se_device *dev = cmd->se_dev; int ret = 0; /* @@ -4254,22 +4265,16 @@ int transport_generic_new_cmd(struct se_cmd *cmd) if (ret < 0) return ret; } - + /* + * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for + * control or data CDB types, and perform the map to backend subsystem + * code from SGL memory allocated here by transport_generic_get_mem(), or + * via pre-existing SGL memory setup explictly by fabric module code with + * transport_generic_map_mem_to_cmd(). + */ ret = transport_new_cmd_obj(cmd); if (ret < 0) return ret; - - list_for_each_entry(task, &cmd->t_task_list, t_list) { - if (atomic_read(&task->task_sent)) - continue; - if (!dev->transport->map_task_SG) - continue; - - ret = dev->transport->map_task_SG(task); - if (ret < 0) - return ret; - } - /* * For WRITEs, let the fabric know its buffer is ready.. * This WRITE struct se_cmd (and all of its associated struct se_task's) diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 740957655a64..54af39d8e3fb 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -239,9 +239,13 @@ struct se_subsystem_api { */ int (*cdb_none)(struct se_task *); /* - * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB + * For SCF_SCSI_DATA_SG_IO_CDB */ - int (*map_task_SG)(struct se_task *); + int (*map_data_SG)(struct se_task *); + /* + * For SCF_SCSI_CONTROL_SG_IO_CDB + */ + int (*map_control_SG)(struct se_task *); /* * attach_hba(): */ -- cgit v1.2.3 From 5de619a31d9cb051d1f818e661af4e54def82316 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 17 Jul 2011 02:57:58 -0700 Subject: target: Update QUEUE ALGORITHM MODIFIER control page default This patch adds the default 'Unrestricted reordering allowed' for SCSI control mode page QUEUE ALGORITHM MODIFIER on a per se_device basis in target_modesense_control() following spc4r23. This includes a new emuluate_rest_reord configfs attribute that currently (only) accepts zero to signal 'Unrestricted reordering allowed' in control mode page usage by the backend target device. Reported-by: Roland Dreier Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_cdb.c | 29 +++++++++++++++++++++++++++++ drivers/target/target_core_configfs.c | 4 ++++ drivers/target/target_core_device.c | 15 ++++++++++++++- include/target/target_core_base.h | 1 + include/target/target_core_device.h | 1 + include/target/target_core_transport.h | 2 ++ 6 files changed, 51 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 982830023661..145739bcf752 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -774,6 +774,35 @@ target_modesense_control(struct se_device *dev, unsigned char *p) p[0] = 0x0a; p[1] = 0x0a; p[2] = 2; + /* + * From spc4r23, 7.4.7 Control mode page + * + * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies + * restrictions on the algorithm used for reordering commands + * having the SIMPLE task attribute (see SAM-4). + * + * Table 368 -- QUEUE ALGORITHM MODIFIER field + * Code Description + * 0h Restricted reordering + * 1h Unrestricted reordering allowed + * 2h to 7h Reserved + * 8h to Fh Vendor specific + * + * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that + * the device server shall order the processing sequence of commands + * having the SIMPLE task attribute such that data integrity is maintained + * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol + * requests is halted at any time, the final value of all data observable + * on the medium shall be the same as if all the commands had been processed + * with the ORDERED task attribute). + * + * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the + * device server may reorder the processing sequence of commands having the + * SIMPLE task attribute in any manner. Any data integrity exposures related to + * command sequence order shall be explicitly handled by the application client + * through the selection of appropriate ommands and task attributes. + */ + p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; /* * From spc4r17, section 7.4.6 Control mode Page * diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index e56c39daeec6..a92176d2e187 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -701,6 +701,9 @@ SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR); DEF_DEV_ATTRIB(is_nonrot); SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR); +DEF_DEV_ATTRIB(emulate_rest_reord); +SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); + DEF_DEV_ATTRIB_RO(hw_block_size); SE_DEV_ATTR_RO(hw_block_size); @@ -750,6 +753,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = { &target_core_dev_attrib_emulate_tpws.attr, &target_core_dev_attrib_enforce_pr_isids.attr, &target_core_dev_attrib_is_nonrot.attr, + &target_core_dev_attrib_emulate_rest_reord.attr, &target_core_dev_attrib_hw_block_size.attr, &target_core_dev_attrib_block_size.attr, &target_core_dev_attrib_hw_max_sectors.attr, diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 81860ddc7cc4..b38b6c993e65 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -857,6 +857,7 @@ void se_dev_set_default_attribs( dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA; dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT; + dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; /* * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK * iblock_create_virtdevice() from struct queue_limits values @@ -1128,11 +1129,23 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag) return -EINVAL; } dev->se_sub_dev->se_dev_attrib.is_nonrot = flag; - printk(KERN_INFO "dev[%p]: SE Device is_nonrot bit: %d\n", + pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n", dev, flag); return 0; } +int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) +{ + if (flag != 0) { + printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted" + " reordering not implemented\n", dev); + return -ENOSYS; + } + dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag; + pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag); + return 0; +} + /* * Note, this can only be called on unexported SE Device Object. */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 8204c76ca12b..86ca35539b94 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -662,6 +662,7 @@ struct se_dev_attrib { int emulate_alua; int enforce_pr_isids; int is_nonrot; + int emulate_rest_reord; u32 hw_block_size; u32 block_size; u32 hw_max_sectors; diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h index f3b6ae655454..46571912086c 100644 --- a/include/target/target_core_device.h +++ b/include/target/target_core_device.h @@ -40,6 +40,7 @@ extern int se_dev_set_emulate_tpu(struct se_device *, int); extern int se_dev_set_emulate_tpws(struct se_device *, int); extern int se_dev_set_enforce_pr_isids(struct se_device *, int); extern int se_dev_set_is_nonrot(struct se_device *, int); +extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int); extern int se_dev_set_queue_depth(struct se_device *, u32); extern int se_dev_set_max_sectors(struct se_device *, u32); extern int se_dev_set_optimal_sectors(struct se_device *, u32); diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 54af39d8e3fb..f41b07c7401e 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -103,6 +103,8 @@ #define DA_STATUS_MAX_SECTORS_MAX 8192 /* By default don't report non-rotating (solid state) medium */ #define DA_IS_NONROT 0 +/* Queue Algorithm Modifier default for restricted reordering in control mode page */ +#define DA_EMULATE_REST_REORD 0 #define SE_MODE_PAGE_BUF 512 -- cgit v1.2.3 From 11650b859681e03fdbf26277fcfc5f1f62186703 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 18 Jul 2011 22:26:40 -0700 Subject: target: remove custom hex2bin() implementation This patch drops transport_asciihex_to_binaryhex() in favor of proper hex2bin usage from include/linux/kernel.h:hex2bin() Signed-off-by: Andy Shevchenko Signed-off-by: Nicholas Bellinger --- drivers/target/target_core_cdb.c | 16 ++++------------ drivers/target/target_core_fabric_lib.c | 17 +++++++---------- drivers/target/target_core_transport.c | 28 ---------------------------- include/target/target_core_transport.h | 1 - 4 files changed, 11 insertions(+), 51 deletions(-) (limited to 'include') diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 27e6a2e5e84a..8ae09a1bdf74 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -23,6 +23,7 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include #include #include @@ -162,11 +163,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) struct t10_alua_lu_gp_member *lu_gp_mem; struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; - unsigned char binary, binary_new; unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0]; u32 prod_len; u32 unit_serial_len, off = 0; - int i; u16 len = 0, id_len; off = 4; @@ -215,16 +214,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) * VENDOR_SPECIFIC_IDENTIFIER and * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION */ - binary = transport_asciihex_to_binaryhex( - &dev->se_sub_dev->t10_wwn.unit_serial[0]); - buf[off++] |= (binary & 0xf0) >> 4; - for (i = 0; i < 24; i += 2) { - binary_new = transport_asciihex_to_binaryhex( - &dev->se_sub_dev->t10_wwn.unit_serial[i+2]); - buf[off] = (binary & 0x0f) << 4; - buf[off++] |= (binary_new & 0xf0) >> 4; - binary = binary_new; - } + buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); + hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); + len = 20; off = (len + 4); diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index a29968806cfc..c4ea3a9a555b 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -25,6 +25,7 @@ * ******************************************************************************/ +#include #include #include #include @@ -61,9 +62,8 @@ u32 sas_get_pr_transport_id( int *format_code, unsigned char *buf) { - unsigned char binary, *ptr; - int i; - u32 off = 4; + unsigned char *ptr; + /* * Set PROTOCOL IDENTIFIER to 6h for SAS */ @@ -74,10 +74,8 @@ u32 sas_get_pr_transport_id( */ ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ - for (i = 0; i < 16; i += 2) { - binary = transport_asciihex_to_binaryhex(&ptr[i]); - buf[off++] = binary; - } + hex2bin(&buf[4], ptr, 8); + /* * The SAS Transport ID is a hardcoded 24-byte length */ @@ -157,7 +155,7 @@ u32 fc_get_pr_transport_id( int *format_code, unsigned char *buf) { - unsigned char binary, *ptr; + unsigned char *ptr; int i; u32 off = 8; /* @@ -176,8 +174,7 @@ u32 fc_get_pr_transport_id( i++; continue; } - binary = transport_asciihex_to_binaryhex(&ptr[i]); - buf[off++] = binary; + hex2bin(&buf[off++], &ptr[i], 1); i += 2; } /* diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 007cfc164f5e..46352d658e35 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2659,34 +2659,6 @@ static inline u32 transport_get_size( return dev->se_sub_dev->se_dev_attrib.block_size * sectors; } -unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]) -{ - unsigned char result = 0; - /* - * MSB - */ - if ((val[0] >= 'a') && (val[0] <= 'f')) - result = ((val[0] - 'a' + 10) & 0xf) << 4; - else - if ((val[0] >= 'A') && (val[0] <= 'F')) - result = ((val[0] - 'A' + 10) & 0xf) << 4; - else /* digit */ - result = ((val[0] - '0') & 0xf) << 4; - /* - * LSB - */ - if ((val[1] >= 'a') && (val[1] <= 'f')) - result |= ((val[1] - 'a' + 10) & 0xf); - else - if ((val[1] >= 'A') && (val[1] <= 'F')) - result |= ((val[1] - 'A' + 10) & 0xf); - else /* digit */ - result |= ((val[1] - '0') & 0xf); - - return result; -} -EXPORT_SYMBOL(transport_asciihex_to_binaryhex); - static void transport_xor_callback(struct se_cmd *cmd) { unsigned char *buf, *addr; diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index f41b07c7401e..46aae4f94ede 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h @@ -179,7 +179,6 @@ extern void transport_new_cmd_failure(struct se_cmd *); extern int transport_generic_handle_tmr(struct se_cmd *); extern void transport_generic_free_cmd_intr(struct se_cmd *); extern void __transport_stop_task_timer(struct se_task *, unsigned long *); -extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]); extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32, struct scatterlist *, u32); extern int transport_clear_lun_from_sessions(struct se_lun *); -- cgit v1.2.3 From fa4951595648c14754621c99a07c47c9b9dcf05b Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 22 Jul 2011 08:24:22 +0000 Subject: target: Bump version to v4.1.0-rc1-ml This patch bumps the target core version to v4.1.0-rc1 now that we are in sync with upstream lio-core-2.6.git/master Signed-off-by: Nicholas Bellinger --- include/target/target_core_base.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 86ca35539b94..27040653005e 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -9,7 +9,7 @@ #include #include -#define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml" +#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml" #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) /* Used by transport_generic_allocate_iovecs() */ -- cgit v1.2.3