diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-03 01:09:46 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-09-03 01:09:46 +0300 |
commit | a9c9a6f741cdaa2fa9ba24a790db8d07295761e3 (patch) | |
tree | 222aaa35ed4e66c2027845213251e2a3f491b5ba /drivers/target | |
parent | 23852bec534a1633dc08f4df88b8493ae99953a9 (diff) | |
parent | 9b5ac8ab4e8bf5636d1d425aee68ddf45af12057 (diff) | |
download | linux-a9c9a6f741cdaa2fa9ba24a790db8d07295761e3.tar.xz |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This series consists of the usual driver updates (ufs, qla2xxx,
target, smartpqi, lpfc, mpt3sas).
The core change causing the most churn was replacing the command
request field request with a macro, allowing us to offset map to it
and remove the redundant field; the same was also done for the tag
field.
The most impactful change is the final removal of scsi_ioctl, which
has been deprecated for over a decade"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (293 commits)
scsi: ufs: Fix ufshcd_request_sense_async() for Samsung KLUFG8RHDA-B2D1
scsi: ufs: ufs-exynos: Fix static checker warning
scsi: mpt3sas: Use the proper SCSI midlayer interfaces for PI
scsi: lpfc: Use the proper SCSI midlayer interfaces for PI
scsi: lpfc: Copyright updates for 14.0.0.1 patches
scsi: lpfc: Update lpfc version to 14.0.0.1
scsi: lpfc: Add bsg support for retrieving adapter cmf data
scsi: lpfc: Add cmf_info sysfs entry
scsi: lpfc: Add debugfs support for cm framework buffers
scsi: lpfc: Add support for maintaining the cm statistics buffer
scsi: lpfc: Add rx monitoring statistics
scsi: lpfc: Add support for the CM framework
scsi: lpfc: Add cmfsync WQE support
scsi: lpfc: Add support for cm enablement buffer
scsi: lpfc: Add cm statistics buffer support
scsi: lpfc: Add EDC ELS support
scsi: lpfc: Expand FPIN and RDF receive logging
scsi: lpfc: Add MIB feature enablement support
scsi: lpfc: Add SET_HOST_DATA mbox cmd to pass date/time info to firmware
scsi: fc: Add EDC ELS definition
...
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/Kconfig | 2 | ||||
-rw-r--r-- | drivers/target/iscsi/cxgbit/cxgbit_ddp.c | 2 | ||||
-rw-r--r-- | drivers/target/loopback/tcm_loop.c | 8 | ||||
-rw-r--r-- | drivers/target/sbp/sbp_target.c | 4 | ||||
-rw-r--r-- | drivers/target/target_core_alua.c | 94 | ||||
-rw-r--r-- | drivers/target/target_core_iblock.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_pscsi.c | 18 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 48 | ||||
-rw-r--r-- | drivers/target/target_core_user.c | 150 | ||||
-rw-r--r-- | drivers/target/target_core_xcopy.c | 26 |
10 files changed, 243 insertions, 111 deletions
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index c163b14774d7..72171ea3dd53 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -5,7 +5,7 @@ menuconfig TARGET_CORE depends on BLOCK select CONFIGFS_FS select CRC_T10DIF - select BLK_SCSI_REQUEST + select SCSI_COMMON select SGL_ALLOC default n help diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c index b044999ad002..072afd070f3e 100644 --- a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c +++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c @@ -234,7 +234,7 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd, struct cxgbit_device *cdev = csk->com.cdev; struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd); struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo; - int ret = -EINVAL; + int ret; if ((!ccmd->setup_ddp) || (!test_bit(CSK_DDP_ENABLE, &csk->com.flags))) diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index cbb2118fb35e..52db28d868d5 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -183,7 +183,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) memset(tl_cmd, 0, sizeof(*tl_cmd)); tl_cmd->sc = sc; - tl_cmd->sc_cmd_tag = sc->request->tag; + tl_cmd->sc_cmd_tag = scsi_cmd_to_rq(sc)->tag; tcm_loop_target_queue_cmd(tl_cmd); return 0; @@ -241,7 +241,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) { struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; - int ret = FAILED; + int ret; /* * Locate the tcm_loop_hba_t pointer @@ -249,7 +249,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc) tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host); tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun, - sc->request->tag, TMR_ABORT_TASK); + scsi_cmd_to_rq(sc)->tag, TMR_ABORT_TASK); return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; } @@ -261,7 +261,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc) { struct tcm_loop_hba *tl_hba; struct tcm_loop_tpg *tl_tpg; - int ret = FAILED; + int ret; /* * Locate the tcm_loop_hba_t pointer diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 4d3ceee23622..b9f9fb5d7e63 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -1389,8 +1389,8 @@ static void sbp_sense_mangle(struct sbp_target_request *req) (sense[0] & 0x80) | /* valid */ ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */ (sense[2] & 0x0f); /* sense_key */ - status[2] = se_cmd->scsi_asc; /* sense_code */ - status[3] = se_cmd->scsi_ascq; /* sense_qualifier */ + status[2] = 0; /* XXX sense_code */ + status[3] = 0; /* XXX sense_qualifier */ /* information */ status[4] = sense[3]; diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 3bb921345bce..cb1de1ecaaa6 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -428,22 +428,6 @@ out: return rc; } -static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq) -{ - /* - * Set SCSI additional sense code (ASC) to 'LUN Not Accessible'; - * The ALUA additional sense code qualifier (ASCQ) is determined - * by the ALUA primary or secondary access state.. - */ - pr_debug("[%s]: ALUA TG Port not available, " - "SenseKey: NOT_READY, ASC/ASCQ: " - "0x04/0x%02x\n", - cmd->se_tfo->fabric_name, alua_ascq); - - cmd->scsi_asc = 0x04; - cmd->scsi_ascq = alua_ascq; -} - static inline void core_alua_state_nonoptimized( struct se_cmd *cmd, unsigned char *cdb, @@ -458,9 +442,9 @@ static inline void core_alua_state_nonoptimized( cmd->alua_nonop_delay = nonop_delay_msecs; } -static inline int core_alua_state_lba_dependent( +static inline sense_reason_t core_alua_state_lba_dependent( struct se_cmd *cmd, - struct t10_alua_tg_pt_gp *tg_pt_gp) + u16 tg_pt_gp_id) { struct se_device *dev = cmd->se_dev; u64 segment_size, segment_mult, sectors, lba; @@ -506,23 +490,19 @@ static inline int core_alua_state_lba_dependent( } if (!cur_map) { spin_unlock(&dev->t10_alua.lba_map_lock); - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); - return 1; + return TCM_ALUA_TG_PT_UNAVAILABLE; } list_for_each_entry(map_mem, &cur_map->lba_map_mem_list, lba_map_mem_list) { - if (map_mem->lba_map_mem_alua_pg_id != - tg_pt_gp->tg_pt_gp_id) + if (map_mem->lba_map_mem_alua_pg_id != tg_pt_gp_id) continue; switch(map_mem->lba_map_mem_alua_state) { case ALUA_ACCESS_STATE_STANDBY: spin_unlock(&dev->t10_alua.lba_map_lock); - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); - return 1; + return TCM_ALUA_TG_PT_STANDBY; case ALUA_ACCESS_STATE_UNAVAILABLE: spin_unlock(&dev->t10_alua.lba_map_lock); - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); - return 1; + return TCM_ALUA_TG_PT_UNAVAILABLE; default: break; } @@ -532,7 +512,7 @@ static inline int core_alua_state_lba_dependent( return 0; } -static inline int core_alua_state_standby( +static inline sense_reason_t core_alua_state_standby( struct se_cmd *cmd, unsigned char *cdb) { @@ -556,24 +536,21 @@ static inline int core_alua_state_standby( case SAI_READ_CAPACITY_16: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); - return 1; + return TCM_ALUA_TG_PT_STANDBY; } case MAINTENANCE_IN: switch (cdb[1] & 0x1f) { case MI_REPORT_TARGET_PGS: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); - return 1; + return TCM_ALUA_TG_PT_STANDBY; } case MAINTENANCE_OUT: switch (cdb[1]) { case MO_SET_TARGET_PGS: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); - return 1; + return TCM_ALUA_TG_PT_STANDBY; } case REQUEST_SENSE: case PERSISTENT_RESERVE_IN: @@ -582,14 +559,13 @@ static inline int core_alua_state_standby( case WRITE_BUFFER: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY); - return 1; + return TCM_ALUA_TG_PT_STANDBY; } return 0; } -static inline int core_alua_state_unavailable( +static inline sense_reason_t core_alua_state_unavailable( struct se_cmd *cmd, unsigned char *cdb) { @@ -606,30 +582,27 @@ static inline int core_alua_state_unavailable( case MI_REPORT_TARGET_PGS: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); - return 1; + return TCM_ALUA_TG_PT_UNAVAILABLE; } case MAINTENANCE_OUT: switch (cdb[1]) { case MO_SET_TARGET_PGS: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); - return 1; + return TCM_ALUA_TG_PT_UNAVAILABLE; } case REQUEST_SENSE: case READ_BUFFER: case WRITE_BUFFER: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE); - return 1; + return TCM_ALUA_TG_PT_UNAVAILABLE; } return 0; } -static inline int core_alua_state_transition( +static inline sense_reason_t core_alua_state_transition( struct se_cmd *cmd, unsigned char *cdb) { @@ -646,16 +619,14 @@ static inline int core_alua_state_transition( case MI_REPORT_TARGET_PGS: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); - return 1; + return TCM_ALUA_STATE_TRANSITION; } case REQUEST_SENSE: case READ_BUFFER: case WRITE_BUFFER: return 0; default: - set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION); - return 1; + return TCM_ALUA_STATE_TRANSITION; } return 0; @@ -674,6 +645,8 @@ target_alua_state_check(struct se_cmd *cmd) struct se_lun *lun = cmd->se_lun; struct t10_alua_tg_pt_gp *tg_pt_gp; int out_alua_state, nonop_delay_msecs; + u16 tg_pt_gp_id; + sense_reason_t rc = TCM_NO_SENSE; if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) return 0; @@ -687,8 +660,7 @@ target_alua_state_check(struct se_cmd *cmd) if (atomic_read(&lun->lun_tg_pt_secondary_offline)) { pr_debug("ALUA: Got secondary offline status for local" " target port\n"); - set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE); - return TCM_CHECK_CONDITION_NOT_READY; + return TCM_ALUA_OFFLINE; } if (!lun->lun_tg_pt_gp) @@ -698,8 +670,8 @@ target_alua_state_check(struct se_cmd *cmd) tg_pt_gp = lun->lun_tg_pt_gp; out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; + tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; - // XXX: keeps using tg_pt_gp witout reference after unlock spin_unlock(&lun->lun_tg_pt_gp_lock); /* * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional @@ -715,20 +687,16 @@ target_alua_state_check(struct se_cmd *cmd) core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs); break; case ALUA_ACCESS_STATE_STANDBY: - if (core_alua_state_standby(cmd, cdb)) - return TCM_CHECK_CONDITION_NOT_READY; + rc = core_alua_state_standby(cmd, cdb); break; case ALUA_ACCESS_STATE_UNAVAILABLE: - if (core_alua_state_unavailable(cmd, cdb)) - return TCM_CHECK_CONDITION_NOT_READY; + rc = core_alua_state_unavailable(cmd, cdb); break; case ALUA_ACCESS_STATE_TRANSITION: - if (core_alua_state_transition(cmd, cdb)) - return TCM_CHECK_CONDITION_NOT_READY; + rc = core_alua_state_transition(cmd, cdb); break; case ALUA_ACCESS_STATE_LBA_DEPENDENT: - if (core_alua_state_lba_dependent(cmd, tg_pt_gp)) - return TCM_CHECK_CONDITION_NOT_READY; + rc = core_alua_state_lba_dependent(cmd, tg_pt_gp_id); break; /* * OFFLINE is a secondary ALUA target port group access state, that is @@ -738,10 +706,16 @@ target_alua_state_check(struct se_cmd *cmd) default: pr_err("Unknown ALUA access state: 0x%02x\n", out_alua_state); - return TCM_INVALID_CDB_FIELD; + rc = TCM_INVALID_CDB_FIELD; } - return 0; + if (rc && rc != TCM_INVALID_CDB_FIELD) { + pr_debug("[%s]: ALUA TG Port not available, " + "SenseKey: NOT_READY, ASC/rc: 0x04/%d\n", + cmd->se_tfo->fabric_name, rc); + } + + return rc; } /* diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 44d9d028f716..4069a1edcfa3 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -83,7 +83,7 @@ static int iblock_configure_device(struct se_device *dev) struct blk_integrity *bi; fmode_t mode; unsigned int max_write_zeroes_sectors; - int ret = -ENOMEM; + int ret; if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { pr_err("Missing udev_path= parameters for IBLOCK\n"); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 2629d2ef3970..75ef52f008ff 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -620,17 +620,17 @@ static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, buf = transport_kmap_data_sg(cmd); if (!buf) { ; /* XXX: TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE */ - } - - if (cdb[0] == MODE_SENSE_10) { - if (!(buf[3] & 0x80)) - buf[3] |= 0x80; } else { - if (!(buf[2] & 0x80)) - buf[2] |= 0x80; - } + if (cdb[0] == MODE_SENSE_10) { + if (!(buf[3] & 0x80)) + buf[3] |= 0x80; + } else { + if (!(buf[2] & 0x80)) + buf[2] |= 0x80; + } - transport_kunmap_data_sg(cmd); + transport_kunmap_data_sg(cmd); + } } } after_mode_sense: diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 26ceabe34de5..14c6f2bb1b01 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -736,8 +736,7 @@ static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); - transport_generic_request_failure(cmd, - TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); + transport_generic_request_failure(cmd, cmd->sense_reason); } /* @@ -855,7 +854,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd) } /* May be called from interrupt context so must not sleep. */ -void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) +void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status, + sense_reason_t sense_reason) { struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; int success, cpu; @@ -865,6 +865,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) return; cmd->scsi_status = scsi_status; + cmd->sense_reason = sense_reason; spin_lock_irqsave(&cmd->t_state_lock, flags); switch (cmd->scsi_status) { @@ -893,6 +894,14 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) queue_work_on(cpu, target_completion_wq, &cmd->work); } +EXPORT_SYMBOL(target_complete_cmd_with_sense); + +void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) +{ + target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ? + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE : + TCM_NO_SENSE); +} EXPORT_SYMBOL(target_complete_cmd); void target_set_cmd_data_length(struct se_cmd *cmd, int length) @@ -2003,7 +2012,6 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_ADDRESS_OUT_OF_RANGE: case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_UNIT_ATTENTION: - case TCM_CHECK_CONDITION_NOT_READY: case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: @@ -2013,6 +2021,10 @@ void transport_generic_request_failure(struct se_cmd *cmd, case TCM_TOO_MANY_SEGMENT_DESCS: case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: case TCM_INVALID_FIELD_IN_COMMAND_IU: + case TCM_ALUA_TG_PT_STANDBY: + case TCM_ALUA_TG_PT_UNAVAILABLE: + case TCM_ALUA_STATE_TRANSITION: + case TCM_ALUA_OFFLINE: break; case TCM_OUT_OF_RESOURCES: cmd->scsi_status = SAM_STAT_TASK_SET_FULL; @@ -3277,9 +3289,6 @@ static const struct sense_detail sense_detail_table[] = { [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { .key = UNIT_ATTENTION, }, - [TCM_CHECK_CONDITION_NOT_READY] = { - .key = NOT_READY, - }, [TCM_MISCOMPARE_VERIFY] = { .key = MISCOMPARE, .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ @@ -3340,6 +3349,26 @@ static const struct sense_detail sense_detail_table[] = { .asc = 0x0e, .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */ }, + [TCM_ALUA_TG_PT_STANDBY] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY, + }, + [TCM_ALUA_TG_PT_UNAVAILABLE] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE, + }, + [TCM_ALUA_STATE_TRANSITION] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_STATE_TRANSITION, + }, + [TCM_ALUA_OFFLINE] = { + .key = NOT_READY, + .asc = 0x04, + .ascq = ASCQ_04H_ALUA_OFFLINE, + }, }; /** @@ -3374,11 +3403,8 @@ static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) cmd->scsi_status = SAM_STAT_BUSY; return; } - } else if (sd->asc == 0) { - WARN_ON_ONCE(cmd->scsi_asc == 0); - asc = cmd->scsi_asc; - ascq = cmd->scsi_ascq; } else { + WARN_ON_ONCE(sd->asc == 0); asc = sd->asc; ascq = sd->ascq; } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index fbb6ffaddfbe..9f552f48084c 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -191,6 +191,7 @@ struct tcmu_cmd { unsigned long deadline; #define TCMU_CMD_BIT_EXPIRED 0 +#define TCMU_CMD_BIT_KEEP_BUF 1 unsigned long flags; }; @@ -1315,11 +1316,13 @@ unlock: mutex_unlock(&udev->cmdr_lock); } -static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) +static bool tcmu_handle_completion(struct tcmu_cmd *cmd, + struct tcmu_cmd_entry *entry, bool keep_buf) { struct se_cmd *se_cmd = cmd->se_cmd; struct tcmu_dev *udev = cmd->tcmu_dev; bool read_len_valid = false; + bool ret = true; uint32_t read_len; /* @@ -1330,6 +1333,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * WARN_ON_ONCE(se_cmd); goto out; } + if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { + pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n", + entry->hdr.cmd_id); + set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + ret = false; + goto out; + } list_del_init(&cmd->queue_entry); @@ -1379,8 +1389,22 @@ done: target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); out: - tcmu_cmd_free_data(cmd, cmd->dbi_cnt); - tcmu_free_cmd(cmd); + if (!keep_buf) { + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); + tcmu_free_cmd(cmd); + } else { + /* + * Keep this command after completion, since userspace still + * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF + * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept + * a second completion later. + * Userspace can free the buffer later by writing the cmd_id + * to new action attribute free_kept_buf. + */ + clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); + set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags); + } + return ret; } static int tcmu_run_tmr_queue(struct tcmu_dev *udev) @@ -1432,6 +1456,7 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev) while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; + bool keep_buf; /* * Flush max. up to end of cmd ring since current entry might @@ -1453,7 +1478,11 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev) } WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); - cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); + keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF); + if (keep_buf) + cmd = xa_load(&udev->commands, entry->hdr.cmd_id); + else + cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); if (!cmd) { pr_err("cmd_id %u not found, ring is broken\n", entry->hdr.cmd_id); @@ -1461,7 +1490,8 @@ static bool tcmu_handle_completions(struct tcmu_dev *udev) return false; } - tcmu_handle_completion(cmd, entry); + if (!tcmu_handle_completion(cmd, entry, keep_buf)) + break; UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(entry->hdr.len_op), @@ -1619,7 +1649,8 @@ static void tcmu_dev_call_rcu(struct rcu_head *p) static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) { - if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) || + test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { kmem_cache_free(tcmu_cmd_cache, cmd); return 0; } @@ -1903,6 +1934,38 @@ static int tcmu_open(struct uio_info *info, struct inode *inode) static int tcmu_release(struct uio_info *info, struct inode *inode) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + struct tcmu_cmd *cmd; + unsigned long i; + bool freed = false; + + mutex_lock(&udev->cmdr_lock); + + xa_for_each(&udev->commands, i, cmd) { + /* Cmds with KEEP_BUF set are no longer on the ring, but + * userspace still holds the data buffer. If userspace closes + * we implicitly free these cmds and buffers, since after new + * open the (new ?) userspace cannot find the cmd in the ring + * and thus never will release the buffer by writing cmd_id to + * free_kept_buf action attribute. + */ + if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) + continue; + pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n", + cmd->cmd_id, udev->name); + freed = true; + + xa_erase(&udev->commands, i); + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); + tcmu_free_cmd(cmd); + } + /* + * We only freed data space, not ring space. Therefore we dont call + * run_tmr_queue, but call run_qfull_queue if tmr_list is empty. + */ + if (freed && list_empty(&udev->tmr_queue)) + run_qfull_queue(udev, false); + + mutex_unlock(&udev->cmdr_lock); clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); @@ -2147,7 +2210,8 @@ static int tcmu_configure_device(struct se_device *dev) mb->version = TCMU_MAILBOX_VERSION; mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN | - TCMU_MAILBOX_FLAG_CAP_TMR; + TCMU_MAILBOX_FLAG_CAP_TMR | + TCMU_MAILBOX_FLAG_CAP_KEEP_BUF; mb->cmdr_off = CMDR_OFF; mb->cmdr_size = udev->cmdr_size; @@ -2279,12 +2343,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) mutex_lock(&udev->cmdr_lock); xa_for_each(&udev->commands, i, cmd) { - pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", - cmd->cmd_id, udev->name, - test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); + pr_debug("removing cmd %u on dev %s from ring %s\n", + cmd->cmd_id, udev->name, + test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ? + "(is expired)" : + (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ? + "(is keep buffer)" : "")); xa_erase(&udev->commands, i); - if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { + if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) && + !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { WARN_ON(!cmd->se_cmd); list_del_init(&cmd->queue_entry); cmd->se_cmd->priv = NULL; @@ -2933,6 +3001,65 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, } CONFIGFS_ATTR_WO(tcmu_, reset_ring); +static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_device *se_dev = container_of(to_config_group(item), + struct se_device, + dev_action_group); + struct tcmu_dev *udev = TCMU_DEV(se_dev); + struct tcmu_cmd *cmd; + u16 cmd_id; + int ret; + + if (!target_dev_configured(&udev->se_dev)) { + pr_err("Device is not configured.\n"); + return -EINVAL; + } + + ret = kstrtou16(page, 0, &cmd_id); + if (ret < 0) + return ret; + + mutex_lock(&udev->cmdr_lock); + + { + XA_STATE(xas, &udev->commands, cmd_id); + + xas_lock(&xas); + cmd = xas_load(&xas); + if (!cmd) { + pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id); + count = -EINVAL; + xas_unlock(&xas); + goto out_unlock; + } + if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { + pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n", + cmd_id); + count = -EINVAL; + xas_unlock(&xas); + goto out_unlock; + } + xas_store(&xas, NULL); + xas_unlock(&xas); + } + + tcmu_cmd_free_data(cmd, cmd->dbi_cnt); + tcmu_free_cmd(cmd); + /* + * We only freed data space, not ring space. Therefore we dont call + * run_tmr_queue, but call run_qfull_queue if tmr_list is empty. + */ + if (list_empty(&udev->tmr_queue)) + run_qfull_queue(udev, false); + +out_unlock: + mutex_unlock(&udev->cmdr_lock); + return count; +} +CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); + static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, &tcmu_attr_qfull_time_out, @@ -2951,6 +3078,7 @@ static struct configfs_attribute **tcmu_attrs; static struct configfs_attribute *tcmu_action_attrs[] = { &tcmu_attr_block_dev, &tcmu_attr_reset_ring, + &tcmu_attr_free_kept_buf, NULL, }; diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 0f1319336f3e..d4fe7cb2bd00 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -674,12 +674,16 @@ static void target_xcopy_do_work(struct work_struct *work) unsigned int max_sectors; int rc = 0; unsigned short nolb, max_nolb, copied_nolb = 0; + sense_reason_t sense_rc; - if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE) + sense_rc = target_parse_xcopy_cmd(xop); + if (sense_rc != TCM_NO_SENSE) goto err_free; - if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) + if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) { + sense_rc = TCM_INVALID_PARAMETER_LIST; goto err_free; + } src_dev = xop->src_dev; dst_dev = xop->dst_dev; @@ -762,20 +766,20 @@ static void target_xcopy_do_work(struct work_struct *work) return; out: + /* + * The XCOPY command was aborted after some data was transferred. + * Terminate command with CHECK CONDITION status, with the sense key + * set to COPY ABORTED. + */ + sense_rc = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE; xcopy_pt_undepend_remotedev(xop); target_free_sgl(xop->xop_data_sg, xop->xop_data_nents); err_free: kfree(xop); - /* - * Don't override an error scsi status if it has already been set - */ - if (ec_cmd->scsi_status == SAM_STAT_GOOD) { - pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY" - " CHECK_CONDITION -> sending response\n", rc); - ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; - } - target_complete_cmd(ec_cmd, ec_cmd->scsi_status); + pr_warn_ratelimited("target_xcopy_do_work: rc: %d, sense: %u, XCOPY operation failed\n", + rc, sense_rc); + target_complete_cmd_with_sense(ec_cmd, SAM_STAT_CHECK_CONDITION, sense_rc); } /* |