diff options
Diffstat (limited to 'drivers/target/target_core_device.c')
-rw-r--r-- | drivers/target/target_core_device.c | 73 |
1 files changed, 35 insertions, 38 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index ba5edec2c5f8..edbcabbf85f7 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -42,13 +42,11 @@ #include <scsi/scsi_device.h> #include <target/target_core_base.h> -#include <target/target_core_device.h> -#include <target/target_core_tpg.h> -#include <target/target_core_transport.h> -#include <target/target_core_fabric_ops.h> +#include <target/target_core_backend.h> +#include <target/target_core_fabric.h> +#include "target_core_internal.h" #include "target_core_alua.h" -#include "target_core_hba.h" #include "target_core_pr.h" #include "target_core_ua.h" @@ -104,7 +102,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -137,7 +134,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; se_cmd->orig_fe_lun = 0; - se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev; se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; } /* @@ -200,7 +196,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun) se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; - se_cmd->se_orig_obj_ptr = se_cmd->se_dev; } spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags); @@ -325,11 +320,12 @@ int core_free_device_list_for_node( void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) { struct se_dev_entry *deve; + unsigned long flags; - spin_lock_irq(&se_nacl->device_list_lock); + spin_lock_irqsave(&se_nacl->device_list_lock, flags); deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; deve->deve_cmds--; - spin_unlock_irq(&se_nacl->device_list_lock); + spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); } void core_update_device_list_access( @@ -661,7 +657,7 @@ int target_report_luns(struct se_task *se_task) unsigned char *buf; u32 cdb_offset = 0, lun_count = 0, offset = 8, i; - buf = transport_kmap_first_data_page(se_cmd); + buf = (unsigned char *) transport_kmap_data_sg(se_cmd); /* * If no struct se_session pointer is present, this struct se_cmd is @@ -699,7 +695,7 @@ int target_report_luns(struct se_task *se_task) * See SPC3 r07, page 159. */ done: - transport_kunmap_first_data_page(se_cmd); + transport_kunmap_data_sg(se_cmd); lun_count *= 8; buf[0] = ((lun_count >> 24) & 0xff); buf[1] = ((lun_count >> 16) & 0xff); @@ -708,7 +704,7 @@ done: se_task->task_scsi_status = GOOD; transport_complete_task(se_task, 1); - return PYX_TRANSPORT_SENT_TO_TRANSPORT; + return 0; } /* se_release_device_for_hba(): @@ -957,8 +953,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag) return -EINVAL; } - pr_err("dpo_emulated not supported\n"); - return -EINVAL; + if (flag) { + pr_err("dpo_emulated not supported\n"); + return -EINVAL; + } + + return 0; } int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) @@ -968,7 +968,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) return -EINVAL; } - if (dev->transport->fua_write_emulated == 0) { + if (flag && dev->transport->fua_write_emulated == 0) { pr_err("fua_write_emulated not supported\n"); return -EINVAL; } @@ -985,8 +985,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) return -EINVAL; } - pr_err("ua read emulated not supported\n"); - return -EINVAL; + if (flag) { + pr_err("ua read emulated not supported\n"); + return -EINVAL; + } + + return 0; } int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) @@ -995,7 +999,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("Illegal value %d\n", flag); return -EINVAL; } - if (dev->transport->write_cache_emulated == 0) { + if (flag && dev->transport->write_cache_emulated == 0) { pr_err("write_cache_emulated not supported\n"); return -EINVAL; } @@ -1056,7 +1060,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag) * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } @@ -1077,7 +1081,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag) * We expect this value to be non-zero when generic Block Layer * Discard supported is detected iblock_create_virtdevice(). */ - if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { + if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { pr_err("Generic Block Discard not supported\n"); return -ENOSYS; } @@ -1129,8 +1133,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag) */ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) { - u32 orig_queue_depth = dev->queue_depth; - if (atomic_read(&dev->dev_export_obj.obj_access_count)) { pr_err("dev[%p]: Unable to change SE Device TCQ while" " dev_export_obj: %d count exists\n", dev, @@ -1164,11 +1166,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) } dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth; - if (queue_depth > orig_queue_depth) - atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); - else if (queue_depth < orig_queue_depth) - atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); - pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, queue_depth); return 0; @@ -1298,24 +1295,26 @@ struct se_lun *core_dev_add_lun( { struct se_lun *lun_p; u32 lun_access = 0; + int rc; if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { pr_err("Unable to export struct se_device while dev_access_obj: %d\n", atomic_read(&dev->dev_access_obj.obj_access_count)); - return NULL; + return ERR_PTR(-EACCES); } lun_p = core_tpg_pre_addlun(tpg, lun); - if ((IS_ERR(lun_p)) || !lun_p) - return NULL; + if (IS_ERR(lun_p)) + return lun_p; if (dev->dev_flags & DF_READ_ONLY) lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; else lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; - if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) - return NULL; + rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev); + if (rc < 0) + return ERR_PTR(rc); pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(), @@ -1352,11 +1351,10 @@ int core_dev_del_lun( u32 unpacked_lun) { struct se_lun *lun; - int ret = 0; - lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); - if (!lun) - return ret; + lun = core_tpg_pre_dellun(tpg, unpacked_lun); + if (IS_ERR(lun)) + return PTR_ERR(lun); core_tpg_post_dellun(tpg, lun); @@ -1587,7 +1585,6 @@ int core_dev_setup_virtual_lun0(void) ret = -ENOMEM; goto out; } - INIT_LIST_HEAD(&se_dev->se_dev_node); INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); INIT_LIST_HEAD(&se_dev->t10_pr.registration_list); |