diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 313 |
1 files changed, 158 insertions, 155 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 06c260f6cdae..0ba7a65e7c8d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -202,24 +202,17 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) blk_mq_requeue_request(cmd->request, true); } -/* - * Function: scsi_queue_insert() - * - * Purpose: Insert a command in the midlevel queue. - * - * Arguments: cmd - command that we are adding to queue. - * reason - why we are inserting command to queue. - * - * Lock status: Assumed that lock is not held upon entry. +/** + * scsi_queue_insert - Reinsert a command in the queue. + * @cmd: command that we are adding to queue. + * @reason: why we are inserting command to queue. * - * Returns: Nothing. + * We do this for one of two cases. Either the host is busy and it cannot accept + * any more commands for the time being, or the device returned QUEUE_FULL and + * can accept no more commands. * - * Notes: We do this for one of two cases. Either the host is busy - * and it cannot accept any more commands for the time being, - * or the device returned QUEUE_FULL and can accept no more - * commands. - * Notes: This could be called either from an interrupt context or a - * normal process context. + * Context: This could be called either from an interrupt context or a normal + * process context. */ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) { @@ -301,16 +294,12 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(__scsi_execute); -/* - * Function: scsi_init_cmd_errh() - * - * Purpose: Initialize cmd fields related to error handling. - * - * Arguments: cmd - command that is ready to be queued. +/** + * scsi_init_cmd_errh - Initialize cmd fields related to error handling. + * @cmd: command that is ready to be queued. * - * Notes: This function has the job of initializing a number of - * fields related to error handling. Typically this will - * be called once for each command, as required. + * This function has the job of initializing a number of fields related to error + * handling. Typically this will be called once for each command, as required. */ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) { @@ -496,17 +485,11 @@ static void scsi_starved_list_run(struct Scsi_Host *shost) spin_unlock_irqrestore(shost->host_lock, flags); } -/* - * Function: scsi_run_queue() - * - * Purpose: Select a proper request queue to serve next - * - * Arguments: q - last request's queue - * - * Returns: Nothing +/** + * scsi_run_queue - Select a proper request queue to serve next. + * @q: last request's queue * - * Notes: The previous command was completely finished, start - * a new one if possible. + * The previous command was completely finished, start a new one if possible. */ static void scsi_run_queue(struct request_queue *q) { @@ -548,7 +531,7 @@ static void scsi_uninit_cmd(struct scsi_cmnd *cmd) } } -static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) +static void scsi_free_sgtables(struct scsi_cmnd *cmd) { if (cmd->sdb.table.nents) sg_free_table_chained(&cmd->sdb.table, @@ -560,7 +543,7 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { - scsi_mq_free_sgtables(cmd); + scsi_free_sgtables(cmd); scsi_uninit_cmd(cmd); } @@ -896,34 +879,27 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, return result; } -/* - * Function: scsi_io_completion() - * - * Purpose: Completion processing for block device I/O requests. - * - * Arguments: cmd - command that is finished. - * - * Lock status: Assumed that no lock is held upon entry. - * - * Returns: Nothing - * - * Notes: We will finish off the specified number of sectors. If we - * are done, the command block will be released and the queue - * function will be goosed. If we are not done then we have to - * figure out what to do next: - * - * a) We can call scsi_requeue_command(). The request - * will be unprepared and put back on the queue. Then - * a new command will be created for it. This should - * be used if we made forward progress, or if we want - * to switch from READ(10) to READ(6) for example. - * - * b) We can call __scsi_queue_insert(). The request will - * be put back on the queue and retried using the same - * command as before, possibly after a delay. - * - * c) We can call scsi_end_request() with blk_stat other than - * BLK_STS_OK, to fail the remainder of the request. +/** + * scsi_io_completion - Completion processing for SCSI commands. + * @cmd: command that is finished. + * @good_bytes: number of processed bytes. + * + * We will finish off the specified number of sectors. If we are done, the + * command block will be released and the queue function will be goosed. If we + * are not done then we have to figure out what to do next: + * + * a) We can call scsi_io_completion_reprep(). The request will be + * unprepared and put back on the queue. Then a new command will + * be created for it. This should be used if we made forward + * progress, or if we want to switch from READ(10) to READ(6) for + * example. + * + * b) We can call scsi_io_completion_action(). The request will be + * put back on the queue and retried using the same command as + * before, possibly after a delay. + * + * c) We can call scsi_end_request() with blk_stat other than + * BLK_STS_OK, to fail the remainder of the request. */ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { @@ -951,8 +927,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) blk_rq_sectors(req), good_bytes)); /* - * Next deal with any sectors which we were able to correctly - * handle. Failed, zero length commands always need to drop down + * Failed, zero length commands always need to drop down * to retry code. Fast path should return in this block. */ if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { @@ -978,56 +953,81 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_io_completion_action(cmd, result); } -static blk_status_t scsi_init_sgtable(struct request *req, - struct scsi_data_buffer *sdb) +static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, + struct request *rq) { + return sdev->dma_drain_len && blk_rq_is_passthrough(rq) && + !op_is_write(req_op(rq)) && + sdev->host->hostt->dma_need_drain(rq); +} + +/** + * scsi_init_io - SCSI I/O initialization function. + * @cmd: command descriptor we wish to initialize + * + * Returns: + * * BLK_STS_OK - on success + * * BLK_STS_RESOURCE - if the failure is retryable + * * BLK_STS_IOERR - if the failure is fatal + */ +blk_status_t scsi_init_io(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct request *rq = cmd->request; + unsigned short nr_segs = blk_rq_nr_phys_segments(rq); + struct scatterlist *last_sg = NULL; + blk_status_t ret; + bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq); int count; + if (WARN_ON_ONCE(!nr_segs)) + return BLK_STS_IOERR; + + /* + * Make sure there is space for the drain. The driver must adjust + * max_hw_segments to be prepared for this. + */ + if (need_drain) + nr_segs++; + /* * If sg table allocation fails, requeue request later. */ - if (unlikely(sg_alloc_table_chained(&sdb->table, - blk_rq_nr_phys_segments(req), sdb->table.sgl, - SCSI_INLINE_SG_CNT))) + if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs, + cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT))) return BLK_STS_RESOURCE; - /* + /* * Next, walk the list, and fill in the addresses and sizes of * each segment. */ - count = blk_rq_map_sg(req->q, req, sdb->table.sgl); - BUG_ON(count > sdb->table.nents); - sdb->table.nents = count; - sdb->length = blk_rq_payload_bytes(req); - return BLK_STS_OK; -} + count = __blk_rq_map_sg(rq->q, rq, cmd->sdb.table.sgl, &last_sg); -/* - * Function: scsi_init_io() - * - * Purpose: SCSI I/O initialize function. - * - * Arguments: cmd - Command descriptor we wish to initialize - * - * Returns: BLK_STS_OK on success - * BLK_STS_RESOURCE if the failure is retryable - * BLK_STS_IOERR if the failure is fatal - */ -blk_status_t scsi_init_io(struct scsi_cmnd *cmd) -{ - struct request *rq = cmd->request; - blk_status_t ret; + if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { + unsigned int pad_len = + (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; - if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq))) - return BLK_STS_IOERR; + last_sg->length += pad_len; + cmd->extra_len += pad_len; + } - ret = scsi_init_sgtable(rq, &cmd->sdb); - if (ret) - return ret; + if (need_drain) { + sg_unmark_end(last_sg); + last_sg = sg_next(last_sg); + sg_set_buf(last_sg, sdev->dma_drain_buf, sdev->dma_drain_len); + sg_mark_end(last_sg); + + cmd->extra_len += sdev->dma_drain_len; + count++; + } + + BUG_ON(count > cmd->sdb.table.nents); + cmd->sdb.table.nents = count; + cmd->sdb.length = blk_rq_payload_bytes(rq); if (blk_integrity_rq(rq)) { struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; - int ivecs, count; + int ivecs; if (WARN_ON_ONCE(!prot_sdb)) { /* @@ -1059,7 +1059,7 @@ blk_status_t scsi_init_io(struct scsi_cmnd *cmd) return BLK_STS_OK; out_free_sgtables: - scsi_mq_free_sgtables(cmd); + scsi_free_sgtables(cmd); return ret; } EXPORT_SYMBOL(scsi_init_io); @@ -1190,6 +1190,7 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, struct request *req) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + blk_status_t ret; if (!blk_rq_bytes(req)) cmd->sc_data_direction = DMA_NONE; @@ -1199,9 +1200,14 @@ static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev, cmd->sc_data_direction = DMA_FROM_DEVICE; if (blk_rq_is_scsi(req)) - return scsi_setup_scsi_cmnd(sdev, req); + ret = scsi_setup_scsi_cmnd(sdev, req); else - return scsi_setup_fs_cmnd(sdev, req); + ret = scsi_setup_fs_cmnd(sdev, req); + + if (ret != BLK_STS_OK) + scsi_free_sgtables(cmd); + + return ret; } static blk_status_t @@ -1610,12 +1616,7 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) struct request_queue *q = hctx->queue; struct scsi_device *sdev = q->queuedata; - if (scsi_dev_queue_ready(q, sdev)) - return true; - - if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev)) - blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); - return false; + return scsi_dev_queue_ready(q, sdev); } static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1684,6 +1685,7 @@ out_put_budget: case BLK_STS_OK: break; case BLK_STS_RESOURCE: + case BLK_STS_ZONE_RESOURCE: if (atomic_read(&sdev->device_busy) || scsi_device_blocked(sdev)) ret = BLK_STS_DEV_RESOURCE; @@ -1870,6 +1872,7 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) int scsi_mq_setup_tags(struct Scsi_Host *shost) { unsigned int cmd_size, sgl_size; + struct blk_mq_tag_set *tag_set = &shost->tag_set; sgl_size = max_t(unsigned int, sizeof(struct scatterlist), scsi_mq_inline_sgl_size(shost)); @@ -1878,21 +1881,21 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) cmd_size += sizeof(struct scsi_data_buffer) + sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; - memset(&shost->tag_set, 0, sizeof(shost->tag_set)); + memset(tag_set, 0, sizeof(*tag_set)); if (shost->hostt->commit_rqs) - shost->tag_set.ops = &scsi_mq_ops; + tag_set->ops = &scsi_mq_ops; else - shost->tag_set.ops = &scsi_mq_ops_no_commit; - shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; - shost->tag_set.queue_depth = shost->can_queue; - shost->tag_set.cmd_size = cmd_size; - shost->tag_set.numa_node = NUMA_NO_NODE; - shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; - shost->tag_set.flags |= + tag_set->ops = &scsi_mq_ops_no_commit; + tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; + tag_set->queue_depth = shost->can_queue; + tag_set->cmd_size = cmd_size; + tag_set->numa_node = NUMA_NO_NODE; + tag_set->flags = BLK_MQ_F_SHOULD_MERGE; + tag_set->flags |= BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); - shost->tag_set.driver_data = shost; + tag_set->driver_data = shost; - return blk_mq_alloc_tag_set(&shost->tag_set); + return blk_mq_alloc_tag_set(tag_set); } void scsi_mq_destroy_tags(struct Scsi_Host *shost) @@ -1921,21 +1924,13 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q) } EXPORT_SYMBOL_GPL(scsi_device_from_queue); -/* - * Function: scsi_block_requests() - * - * Purpose: Utility function used by low-level drivers to prevent further - * commands from being queued to the device. - * - * Arguments: shost - Host in question - * - * Returns: Nothing - * - * Lock status: No locks are assumed held. +/** + * scsi_block_requests - Utility function used by low-level drivers to prevent + * further commands from being queued to the device. + * @shost: host in question * - * Notes: There is no timer nor any other means by which the requests - * get unblocked other than the low-level driver calling - * scsi_unblock_requests(). + * There is no timer nor any other means by which the requests get unblocked + * other than the low-level driver calling scsi_unblock_requests(). */ void scsi_block_requests(struct Scsi_Host *shost) { @@ -1943,25 +1938,15 @@ void scsi_block_requests(struct Scsi_Host *shost) } EXPORT_SYMBOL(scsi_block_requests); -/* - * Function: scsi_unblock_requests() - * - * Purpose: Utility function used by low-level drivers to allow further - * commands from being queued to the device. - * - * Arguments: shost - Host in question - * - * Returns: Nothing - * - * Lock status: No locks are assumed held. - * - * Notes: There is no timer nor any other means by which the requests - * get unblocked other than the low-level driver calling - * scsi_unblock_requests(). - * - * This is done as an API function so that changes to the - * internals of the scsi mid-layer won't require wholesale - * changes to drivers that use this feature. +/** + * scsi_unblock_requests - Utility function used by low-level drivers to allow + * further commands to be queued to the device. + * @shost: host in question + * + * There is no timer nor any other means by which the requests get unblocked + * other than the low-level driver calling scsi_unblock_requests(). This is done + * as an API function so that changes to the internals of the scsi mid-layer + * won't require wholesale changes to drivers that use this feature. */ void scsi_unblock_requests(struct Scsi_Host *shost) { @@ -2842,11 +2827,27 @@ scsi_host_block(struct Scsi_Host *shost) struct scsi_device *sdev; int ret = 0; + /* + * Call scsi_internal_device_block_nowait so we can avoid + * calling synchronize_rcu() for each LUN. + */ shost_for_each_device(sdev, shost) { - ret = scsi_internal_device_block(sdev); + mutex_lock(&sdev->state_mutex); + ret = scsi_internal_device_block_nowait(sdev); + mutex_unlock(&sdev->state_mutex); if (ret) break; } + + /* + * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so + * calling synchronize_rcu() once is enough. + */ + WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING); + + if (!ret) + synchronize_rcu(); + return ret; } EXPORT_SYMBOL_GPL(scsi_host_block); @@ -2859,8 +2860,10 @@ scsi_host_unblock(struct Scsi_Host *shost, int new_state) shost_for_each_device(sdev, shost) { ret = scsi_internal_device_unblock(sdev, new_state); - if (ret) + if (ret) { + scsi_device_put(sdev); break; + } } return ret; } |