diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 22:10:33 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-06 22:10:33 +0300 |
commit | 90311148415ab23f5767fbb577a012d4405f12e5 (patch) | |
tree | b66d2fe65409b949bb559d628bcf72c443ef1264 /drivers/scsi/scsi_lib.c | |
parent | 3a564bb3a8a6950e18b1f5d209bda39fc3831074 (diff) | |
parent | c345c6ca13825d1a15de5399226802433dd30f8c (diff) | |
download | linux-90311148415ab23f5767fbb577a012d4405f12e5.tar.xz |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This is mostly updates of the usual suspects: lpfc, qla2xxx, bnx2fc,
qedf, hpsa, hisi_sas, smartpqi, cxlflash, aacraid, csiostor along with
a host of minor and miscellaneous changes"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (276 commits)
qla2xxx: Fix NVMe entry_type for iocb packet on BE system
scsi: qla2xxx: avoid unused-function warning
scsi: snic: fix a couple of spelling mistakes/typos
scsi: qla2xxx: fix a bunch of typos and spelling mistakes
scsi: lpfc: don't double count abort errors
scsi: lpfc: spin_lock_irq() is not nestable
scsi: hisi_sas: optimise DMA slot memory
scsi: ibmvfc: constify dev_pm_ops structures.
scsi: ibmvscsi: constify dev_pm_ops structures.
scsi: cxlflash: Update debug prints in reset handlers
scsi: cxlflash: Update send_tmf() parameters
scsi: cxlflash: Avoid double free of character device
scsi: Add STARGET_CREATED_REMOVE state to scsi_target_state
scsi: ses: do not add a device to an enclosure if enclosure_add_links() fails.
scsi: ufs: flush eh_work when eh_work scheduled.
scsi: qla2xxx: Protect access to qpair members with qpair->qp_lock
scsi: sun_esp: fix device reference leaks
scsi: fnic: changing queue command to return result DID_IMM_RETRY when rport is init
scsi: fnic: correct speed display and add support for 25,40 and 100G
scsi: fnic: added timestamp reporting in fnic debug stats
...
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 306 |
1 files changed, 191 insertions, 115 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 550e29f903b7..f6097b89d5d3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -45,23 +45,23 @@ static struct kmem_cache *scsi_sense_isadma_cache; static DEFINE_MUTEX(scsi_sense_cache_mutex); static inline struct kmem_cache * -scsi_select_sense_cache(struct Scsi_Host *shost) +scsi_select_sense_cache(bool unchecked_isa_dma) { - return shost->unchecked_isa_dma ? - scsi_sense_isadma_cache : scsi_sense_cache; + return unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache; } -static void scsi_free_sense_buffer(struct Scsi_Host *shost, - unsigned char *sense_buffer) +static void scsi_free_sense_buffer(bool unchecked_isa_dma, + unsigned char *sense_buffer) { - kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer); + kmem_cache_free(scsi_select_sense_cache(unchecked_isa_dma), + sense_buffer); } -static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost, +static unsigned char *scsi_alloc_sense_buffer(bool unchecked_isa_dma, gfp_t gfp_mask, int numa_node) { - return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask, - numa_node); + return kmem_cache_alloc_node(scsi_select_sense_cache(unchecked_isa_dma), + gfp_mask, numa_node); } int scsi_init_sense_cache(struct Scsi_Host *shost) @@ -69,7 +69,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) struct kmem_cache *cache; int ret = 0; - cache = scsi_select_sense_cache(shost); + cache = scsi_select_sense_cache(shost->unchecked_isa_dma); if (cache) return 0; @@ -583,19 +583,9 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { - struct scsi_device *sdev = cmd->device; - struct Scsi_Host *shost = sdev->host; - unsigned long flags; - scsi_mq_free_sgtables(cmd); scsi_uninit_cmd(cmd); - - if (shost->use_cmd_list) { - BUG_ON(list_empty(&cmd->list)); - spin_lock_irqsave(&sdev->list_lock, flags); - list_del_init(&cmd->list); - spin_unlock_irqrestore(&sdev->list_lock, flags); - } + scsi_del_cmd_from_list(cmd); } /* @@ -1129,12 +1119,41 @@ void scsi_initialize_rq(struct request *rq) } EXPORT_SYMBOL(scsi_initialize_rq); +/* Add a command to the list used by the aacraid and dpt_i2o drivers */ +void scsi_add_cmd_to_list(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct Scsi_Host *shost = sdev->host; + unsigned long flags; + + if (shost->use_cmd_list) { + spin_lock_irqsave(&sdev->list_lock, flags); + list_add_tail(&cmd->list, &sdev->cmd_list); + spin_unlock_irqrestore(&sdev->list_lock, flags); + } +} + +/* Remove a command from the list used by the aacraid and dpt_i2o drivers */ +void scsi_del_cmd_from_list(struct scsi_cmnd *cmd) +{ + struct scsi_device *sdev = cmd->device; + struct Scsi_Host *shost = sdev->host; + unsigned long flags; + + if (shost->use_cmd_list) { + spin_lock_irqsave(&sdev->list_lock, flags); + BUG_ON(list_empty(&cmd->list)); + list_del_init(&cmd->list); + spin_unlock_irqrestore(&sdev->list_lock, flags); + } +} + /* Called after a request has been started. */ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) { void *buf = cmd->sense_buffer; void *prot = cmd->prot_sdb; - unsigned long flags; + unsigned int unchecked_isa_dma = cmd->flags & SCMD_UNCHECKED_ISA_DMA; /* zero out the cmd, except for the embedded scsi_request */ memset((char *)cmd + sizeof(cmd->req), 0, @@ -1143,12 +1162,11 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) cmd->device = dev; cmd->sense_buffer = buf; cmd->prot_sdb = prot; + cmd->flags = unchecked_isa_dma; INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies; - spin_lock_irqsave(&dev->list_lock, flags); - list_add_tail(&cmd->list, &dev->cmd_list); - spin_unlock_irqrestore(&dev->list_lock, flags); + scsi_add_cmd_to_list(cmd); } static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req) @@ -1837,46 +1855,33 @@ static inline blk_status_t prep_to_mq(int ret) } } +/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ +static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost) +{ + return min_t(unsigned int, shost->sg_tablesize, SG_CHUNK_SIZE) * + sizeof(struct scatterlist); +} + static int scsi_mq_prep_fn(struct request *req) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); struct scsi_device *sdev = req->q->queuedata; struct Scsi_Host *shost = sdev->host; - unsigned char *sense_buf = cmd->sense_buffer; struct scatterlist *sg; - /* zero out the cmd, except for the embedded scsi_request */ - memset((char *)cmd + sizeof(cmd->req), 0, - sizeof(*cmd) - sizeof(cmd->req) + shost->hostt->cmd_size); + scsi_init_command(sdev, cmd); req->special = cmd; cmd->request = req; - cmd->device = sdev; - cmd->sense_buffer = sense_buf; cmd->tag = req->tag; - cmd->prot_op = SCSI_PROT_NORMAL; - INIT_LIST_HEAD(&cmd->list); - INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); - cmd->jiffies_at_alloc = jiffies; - - if (shost->use_cmd_list) { - spin_lock_irq(&sdev->list_lock); - list_add_tail(&cmd->list, &sdev->cmd_list); - spin_unlock_irq(&sdev->list_lock); - } - sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; cmd->sdb.table.sgl = sg; if (scsi_host_get_prot(shost)) { - cmd->prot_sdb = (void *)sg + - min_t(unsigned int, - shost->sg_tablesize, SG_CHUNK_SIZE) * - sizeof(struct scatterlist); memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); cmd->prot_sdb->table.sgl = @@ -2000,23 +2005,34 @@ static int scsi_init_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx, unsigned int numa_node) { struct Scsi_Host *shost = set->driver_data; + const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + struct scatterlist *sg; - cmd->sense_buffer = - scsi_alloc_sense_buffer(shost, GFP_KERNEL, numa_node); + if (unchecked_isa_dma) + cmd->flags |= SCMD_UNCHECKED_ISA_DMA; + cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, + GFP_KERNEL, numa_node); if (!cmd->sense_buffer) return -ENOMEM; cmd->req.sense = cmd->sense_buffer; + + if (scsi_host_get_prot(shost)) { + sg = (void *)cmd + sizeof(struct scsi_cmnd) + + shost->hostt->cmd_size; + cmd->prot_sdb = (void *)sg + scsi_mq_sgl_size(shost); + } + return 0; } static void scsi_exit_request(struct blk_mq_tag_set *set, struct request *rq, unsigned int hctx_idx) { - struct Scsi_Host *shost = set->driver_data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, + cmd->sense_buffer); } static int scsi_map_queues(struct blk_mq_tag_set *set) @@ -2091,11 +2107,15 @@ EXPORT_SYMBOL_GPL(__scsi_init_queue); static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) { struct Scsi_Host *shost = q->rq_alloc_data; + const bool unchecked_isa_dma = shost->unchecked_isa_dma; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); memset(cmd, 0, sizeof(*cmd)); - cmd->sense_buffer = scsi_alloc_sense_buffer(shost, gfp, NUMA_NO_NODE); + if (unchecked_isa_dma) + cmd->flags |= SCMD_UNCHECKED_ISA_DMA; + cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp, + NUMA_NO_NODE); if (!cmd->sense_buffer) goto fail; cmd->req.sense = cmd->sense_buffer; @@ -2109,19 +2129,19 @@ static int scsi_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp) return 0; fail_free_sense: - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer); fail: return -ENOMEM; } static void scsi_exit_rq(struct request_queue *q, struct request *rq) { - struct Scsi_Host *shost = q->rq_alloc_data; struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); if (cmd->prot_sdb) kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); - scsi_free_sense_buffer(shost, cmd->sense_buffer); + scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA, + cmd->sense_buffer); } struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) @@ -2179,12 +2199,9 @@ struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev) int scsi_mq_setup_tags(struct Scsi_Host *shost) { - unsigned int cmd_size, sgl_size, tbl_size; + unsigned int cmd_size, sgl_size; - tbl_size = shost->sg_tablesize; - if (tbl_size > SG_CHUNK_SIZE) - tbl_size = SG_CHUNK_SIZE; - sgl_size = tbl_size * sizeof(struct scatterlist); + sgl_size = scsi_mq_sgl_size(shost); cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; if (scsi_host_get_prot(shost)) cmd_size += sizeof(struct scsi_data_buffer) + sgl_size; @@ -2614,7 +2631,6 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_QUIESCE: case SDEV_OFFLINE: case SDEV_TRANSPORT_OFFLINE: - case SDEV_BLOCK: break; default: goto illegal; @@ -2628,6 +2644,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_OFFLINE: case SDEV_TRANSPORT_OFFLINE: case SDEV_CANCEL: + case SDEV_BLOCK: case SDEV_CREATED_BLOCK: break; default: @@ -2871,7 +2888,12 @@ static void scsi_wait_for_queuecommand(struct scsi_device *sdev) int scsi_device_quiesce(struct scsi_device *sdev) { - int err = scsi_device_set_state(sdev, SDEV_QUIESCE); + int err; + + mutex_lock(&sdev->state_mutex); + err = scsi_device_set_state(sdev, SDEV_QUIESCE); + mutex_unlock(&sdev->state_mutex); + if (err) return err; @@ -2899,10 +2921,11 @@ void scsi_device_resume(struct scsi_device *sdev) * so assume the state is being managed elsewhere (for example * device deleted during suspend) */ - if (sdev->sdev_state != SDEV_QUIESCE || - scsi_device_set_state(sdev, SDEV_RUNNING)) - return; - scsi_run_queue(sdev->request_queue); + mutex_lock(&sdev->state_mutex); + if (sdev->sdev_state == SDEV_QUIESCE && + scsi_device_set_state(sdev, SDEV_RUNNING) == 0) + scsi_run_queue(sdev->request_queue); + mutex_unlock(&sdev->state_mutex); } EXPORT_SYMBOL(scsi_device_resume); @@ -2933,28 +2956,20 @@ scsi_target_resume(struct scsi_target *starget) EXPORT_SYMBOL(scsi_target_resume); /** - * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state - * @sdev: device to block - * @wait: Whether or not to wait until ongoing .queuecommand() / - * .queue_rq() calls have finished. - * - * Block request made by scsi lld's to temporarily stop all - * scsi commands on the specified device. May sleep. + * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state + * @sdev: device to block * - * Returns zero if successful or error if not + * Pause SCSI command processing on the specified device. Does not sleep. * - * Notes: - * This routine transitions the device to the SDEV_BLOCK state - * (which must be a legal transition). When the device is in this - * state, all commands are deferred until the scsi lld reenables - * the device with scsi_device_unblock or device_block_tmo fires. + * Returns zero if successful or a negative error code upon failure. * - * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after - * scsi_internal_device_block() has blocked a SCSI device and also - * remove the rport mutex lock and unlock calls from srp_queuecommand(). + * Notes: + * This routine transitions the device to the SDEV_BLOCK state (which must be + * a legal transition). When the device is in this state, command processing + * is paused until the device leaves the SDEV_BLOCK state. See also + * scsi_internal_device_unblock_nowait(). */ -int -scsi_internal_device_block(struct scsi_device *sdev, bool wait) +int scsi_internal_device_block_nowait(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; unsigned long flags; @@ -2974,45 +2989,86 @@ scsi_internal_device_block(struct scsi_device *sdev, bool wait) * request queue. */ if (q->mq_ops) { - if (wait) - blk_mq_quiesce_queue(q); - else - blk_mq_quiesce_queue_nowait(q); + blk_mq_quiesce_queue_nowait(q); } else { spin_lock_irqsave(q->queue_lock, flags); blk_stop_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); - if (wait) - scsi_wait_for_queuecommand(sdev); } return 0; } -EXPORT_SYMBOL_GPL(scsi_internal_device_block); - +EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); + /** - * scsi_internal_device_unblock - resume a device after a block request - * @sdev: device to resume - * @new_state: state to set devices to after unblocking + * scsi_internal_device_block - try to transition to the SDEV_BLOCK state + * @sdev: device to block + * + * Pause SCSI command processing on the specified device and wait until all + * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep. * - * Called by scsi lld's or the midlayer to restart the device queue - * for the previously suspended scsi device. Called from interrupt or - * normal process context. + * Returns zero if successful or a negative error code upon failure. * - * Returns zero if successful or error if not. + * Note: + * This routine transitions the device to the SDEV_BLOCK state (which must be + * a legal transition). When the device is in this state, command processing + * is paused until the device leaves the SDEV_BLOCK state. See also + * scsi_internal_device_unblock(). * - * Notes: - * This routine transitions the device to the SDEV_RUNNING state - * or to one of the offline states (which must be a legal transition) - * allowing the midlayer to goose the queue for this device. + * To do: avoid that scsi_send_eh_cmnd() calls queuecommand() after + * scsi_internal_device_block() has blocked a SCSI device and also + * remove the rport mutex lock and unlock calls from srp_queuecommand(). */ -int -scsi_internal_device_unblock(struct scsi_device *sdev, - enum scsi_device_state new_state) +static int scsi_internal_device_block(struct scsi_device *sdev) { - struct request_queue *q = sdev->request_queue; + struct request_queue *q = sdev->request_queue; + int err; + + mutex_lock(&sdev->state_mutex); + err = scsi_internal_device_block_nowait(sdev); + if (err == 0) { + if (q->mq_ops) + blk_mq_quiesce_queue(q); + else + scsi_wait_for_queuecommand(sdev); + } + mutex_unlock(&sdev->state_mutex); + + return err; +} + +void scsi_start_queue(struct scsi_device *sdev) +{ + struct request_queue *q = sdev->request_queue; unsigned long flags; + if (q->mq_ops) { + blk_mq_unquiesce_queue(q); + } else { + spin_lock_irqsave(q->queue_lock, flags); + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + } +} + +/** + * scsi_internal_device_unblock_nowait - resume a device after a block request + * @sdev: device to resume + * @new_state: state to set the device to after unblocking + * + * Restart the device queue for a previously suspended SCSI device. Does not + * sleep. + * + * Returns zero if successful or a negative error code upon failure. + * + * Notes: + * This routine transitions the device to the SDEV_RUNNING state or to one of + * the offline states (which must be a legal transition) allowing the midlayer + * to goose the queue for this device. + */ +int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, + enum scsi_device_state new_state) +{ /* * Try to transition the scsi device to SDEV_RUNNING or one of the * offlined states and goose the device queue if successful. @@ -3030,22 +3086,42 @@ scsi_internal_device_unblock(struct scsi_device *sdev, sdev->sdev_state != SDEV_OFFLINE) return -EINVAL; - if (q->mq_ops) { - blk_mq_unquiesce_queue(q); - } else { - spin_lock_irqsave(q->queue_lock, flags); - blk_start_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); - } + scsi_start_queue(sdev); return 0; } -EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); +EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); + +/** + * scsi_internal_device_unblock - resume a device after a block request + * @sdev: device to resume + * @new_state: state to set the device to after unblocking + * + * Restart the device queue for a previously suspended SCSI device. May sleep. + * + * Returns zero if successful or a negative error code upon failure. + * + * Notes: + * This routine transitions the device to the SDEV_RUNNING state or to one of + * the offline states (which must be a legal transition) allowing the midlayer + * to goose the queue for this device. + */ +static int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state) +{ + int ret; + + mutex_lock(&sdev->state_mutex); + ret = scsi_internal_device_unblock_nowait(sdev, new_state); + mutex_unlock(&sdev->state_mutex); + + return ret; +} static void device_block(struct scsi_device *sdev, void *data) { - scsi_internal_device_block(sdev, true); + scsi_internal_device_block(sdev); } static int |