diff options
Diffstat (limited to 'drivers/scsi')
76 files changed, 1161 insertions, 882 deletions
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index f2abffce2659..f7b7ffda1161 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -2198,7 +2198,7 @@ static int blogic_slaveconfig(struct scsi_device *dev) static int __init blogic_init(void) { - int adapter_count = 0, drvr_optindex = 0, probeindex; + int drvr_optindex = 0, probeindex; struct blogic_adapter *adapter; int ret = 0; @@ -2368,10 +2368,8 @@ static int __init blogic_init(void) list_del(&myadapter->host_list); scsi_host_put(host); ret = -ENODEV; - } else { + } else scsi_scan_host(host); - adapter_count++; - } } } else { /* diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index dece7d9eb4d3..ca85bddb582b 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -858,7 +858,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) * latency, but a bus reset will reset chip logic. Checking for parity error * is unnecessary because that interrupt is never enabled. A Loss of BSY * condition will clear DMA Mode. We can tell when this occurs because the - * the Busy Monitor interrupt is enabled together with DMA Mode. + * Busy Monitor interrupt is enabled together with DMA Mode. */ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id) diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index 0314e4b9e1fb..a12d693065ce 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -1548,7 +1548,7 @@ enum fdmi_port_attribute_type { struct fdmi_attr_s { __be16 type; __be16 len; - u8 value[1]; + u8 value[]; }; /* diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index be8dfbe13e90..79d4f7ee5bcb 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -2540,6 +2540,35 @@ out: return 0; } +/* + * Set the SCSI device sdev_bflags - sdev_bflags are used by the + * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan + * + * Internally iterates over all the ITNIM's part of the im_port & sets the + * sdev_bflags for the scsi_device associated with LUN #0. + */ +static void bfad_reset_sdev_bflags(struct bfad_im_port_s *im_port, + int lunmask_cfg) +{ + const u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; + struct bfad_itnim_s *itnim; + struct scsi_device *sdev; + unsigned long flags; + + spin_lock_irqsave(im_port->shost->host_lock, flags); + list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { + sdev = __scsi_device_lookup(im_port->shost, itnim->channel, + itnim->scsi_tgt_id, 0); + if (sdev) { + if (lunmask_cfg == BFA_TRUE) + sdev->sdev_bflags |= scan_flags; + else + sdev->sdev_bflags &= ~scan_flags; + } + } + spin_unlock_irqrestore(im_port->shost->host_lock, flags); +} + /* Function to reset the LUN SCAN mode */ static void bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg) diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index c03b225ea1ba..4353feedf76a 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h @@ -198,30 +198,4 @@ irqreturn_t bfad_intx(int irq, void *dev_id); int bfad_im_bsg_request(struct bsg_job *job); int bfad_im_bsg_timeout(struct bsg_job *job); -/* - * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the - * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan - * - * Internally iterate's over all the ITNIM's part of the im_port & set's the - * sdev_bflags for the scsi_device associated with LUN #0. - */ -#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \ - struct scsi_device *__sdev = NULL; \ - struct bfad_itnim_s *__itnim = NULL; \ - u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \ - list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \ - list_entry) { \ - __sdev = scsi_device_lookup((__im_port)->shost, \ - __itnim->channel, \ - __itnim->scsi_tgt_id, 0); \ - if (__sdev) { \ - if ((__lunmask_cfg) == BFA_TRUE) \ - __sdev->sdev_bflags |= scan_flags; \ - else \ - __sdev->sdev_bflags &= ~scan_flags; \ - scsi_device_put(__sdev); \ - } \ - } \ -} while (0) - #endif diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index fe0355c964bc..a516df019c22 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -1051,7 +1051,6 @@ csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q, struct csio_fl_dma_buf flb; struct csio_dma_buf *buf, *fbuf; uint32_t bufsz, len, lastlen = 0; - int n; struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx]; CSIO_DB_ASSERT(flq != NULL); @@ -1071,7 +1070,7 @@ csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q, flb.totlen = len; /* Consume all freelist buffers used for len bytes */ - for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) { + for (fbuf = flb.flbufs; ; fbuf++) { buf = &flq->un.fl.bufs[flq->cidx]; bufsz = csio_wr_fl_bufsz(sge, buf); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 610a51538f03..49cc18a87473 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -354,6 +354,8 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, "%s: port group %x rel port %x\n", ALUA_DH_NAME, group_id, rel_port); + kref_get(&pg->kref); + /* Check for existing port group references */ spin_lock(&h->pg_lock); old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); @@ -373,11 +375,11 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, list_add_rcu(&h->node, &pg->dh_list); spin_unlock_irqrestore(&pg->lock, flags); - alua_rtpg_queue(rcu_dereference_protected(h->pg, - lockdep_is_held(&h->pg_lock)), - sdev, NULL, true); spin_unlock(&h->pg_lock); + alua_rtpg_queue(pg, sdev, NULL, true); + kref_put(&pg->kref, release_port_group); + if (old_pg) kref_put(&old_pg->kref, release_port_group); @@ -811,14 +813,19 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) return SCSI_DH_RETRY; } -static bool alua_rtpg_select_sdev(struct alua_port_group *pg) +/* + * The caller must call scsi_device_put() on the returned pointer if it is not + * NULL. + */ +static struct scsi_device * __must_check +alua_rtpg_select_sdev(struct alua_port_group *pg) { struct alua_dh_data *h; - struct scsi_device *sdev = NULL; + struct scsi_device *sdev = NULL, *prev_sdev; lockdep_assert_held(&pg->lock); if (WARN_ON(!pg->rtpg_sdev)) - return false; + return NULL; /* * RCU protection isn't necessary for dh_list here @@ -845,22 +852,22 @@ static bool alua_rtpg_select_sdev(struct alua_port_group *pg) pr_warn("%s: no device found for rtpg\n", (pg->device_id_len ? (char *)pg->device_id_str : "(nameless PG)")); - return false; + return NULL; } sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n"); - scsi_device_put(pg->rtpg_sdev); + prev_sdev = pg->rtpg_sdev; pg->rtpg_sdev = sdev; - return true; + return prev_sdev; } static void alua_rtpg_work(struct work_struct *work) { struct alua_port_group *pg = container_of(work, struct alua_port_group, rtpg_work.work); - struct scsi_device *sdev; + struct scsi_device *sdev, *prev_sdev = NULL; LIST_HEAD(qdata_list); int err = SCSI_DH_OK; struct alua_queue_data *qdata, *tmp; @@ -901,7 +908,7 @@ static void alua_rtpg_work(struct work_struct *work) /* If RTPG failed on the current device, try using another */ if (err == SCSI_DH_RES_TEMP_UNAVAIL && - alua_rtpg_select_sdev(pg)) + (prev_sdev = alua_rtpg_select_sdev(pg))) err = SCSI_DH_IMM_RETRY; if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY || @@ -913,9 +920,7 @@ static void alua_rtpg_work(struct work_struct *work) pg->interval = ALUA_RTPG_RETRY_DELAY; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); - queue_delayed_work(kaluad_wq, &pg->rtpg_work, - pg->interval * HZ); - return; + goto queue_rtpg; } if (err != SCSI_DH_OK) pg->flags &= ~ALUA_PG_RUN_STPG; @@ -930,9 +935,7 @@ static void alua_rtpg_work(struct work_struct *work) pg->interval = 0; pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); - queue_delayed_work(kaluad_wq, &pg->rtpg_work, - pg->interval * HZ); - return; + goto queue_rtpg; } } @@ -946,6 +949,9 @@ static void alua_rtpg_work(struct work_struct *work) pg->rtpg_sdev = NULL; spin_unlock_irqrestore(&pg->lock, flags); + if (prev_sdev) + scsi_device_put(prev_sdev); + list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { list_del(&qdata->entry); if (qdata->callback_fn) @@ -957,6 +963,12 @@ static void alua_rtpg_work(struct work_struct *work) spin_unlock_irqrestore(&pg->lock, flags); scsi_device_put(sdev); kref_put(&pg->kref, release_port_group); + return; + +queue_rtpg: + if (prev_sdev) + scsi_device_put(prev_sdev); + queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); } /** @@ -976,6 +988,9 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, { int start_queue = 0; unsigned long flags; + + might_sleep(); + if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) return false; @@ -986,11 +1001,17 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, force = true; } if (pg->rtpg_sdev == NULL) { - pg->interval = 0; - pg->flags |= ALUA_PG_RUN_RTPG; - kref_get(&pg->kref); - pg->rtpg_sdev = sdev; - start_queue = 1; + struct alua_dh_data *h = sdev->handler_data; + + rcu_read_lock(); + if (h && rcu_dereference(h->pg) == pg) { + pg->interval = 0; + pg->flags |= ALUA_PG_RUN_RTPG; + kref_get(&pg->kref); + pg->rtpg_sdev = sdev; + start_queue = 1; + } + rcu_read_unlock(); } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { pg->flags |= ALUA_PG_RUN_RTPG; /* Do not queue if the worker is already running */ diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c index b08fc8839808..49fd2cfed70c 100644 --- a/drivers/scsi/elx/efct/efct_driver.c +++ b/drivers/scsi/elx/efct/efct_driver.c @@ -42,6 +42,7 @@ efct_device_init(void) rc = efct_scsi_reg_fc_transport(); if (rc) { + efct_scsi_tgt_driver_exit(); pr_err("failed to register to FC host\n"); return rc; } diff --git a/drivers/scsi/elx/libefc/efclib.h b/drivers/scsi/elx/libefc/efclib.h index dde20891c2dd..57e338612812 100644 --- a/drivers/scsi/elx/libefc/efclib.h +++ b/drivers/scsi/elx/libefc/efclib.h @@ -58,10 +58,12 @@ enum efc_node_send_ls_acc { #define EFC_LINK_STATUS_UP 0 #define EFC_LINK_STATUS_DOWN 1 +enum efc_sm_event; + /* State machine context header */ struct efc_sm_ctx { void (*current_state)(struct efc_sm_ctx *ctx, - u32 evt, void *arg); + enum efc_sm_event evt, void *arg); const char *description; void *app; @@ -365,7 +367,7 @@ struct efc_node { int prev_evt; void (*nodedb_state)(struct efc_sm_ctx *ctx, - u32 evt, void *arg); + enum efc_sm_event evt, void *arg); struct timer_list gidpt_delay_timer; u64 time_last_gidpt_msec; diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 7a4eadad23d7..d7a2c49ff5ee 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -248,8 +248,6 @@ static struct scsi_host_template driver_template = { .sg_tablesize = SG_CHUNK_SIZE, .cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN, - .present = 0, - .emulated = 0, .proc_name = ESAS2R_DRVR_NAME, .change_queue_depth = scsi_change_queue_depth, .max_sectors = 0xFFFF, @@ -637,10 +635,13 @@ static void __exit esas2r_exit(void) esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__); if (esas2r_proc_major > 0) { + struct proc_dir_entry *proc_dir; + esas2r_log(ESAS2R_LOG_INFO, "unregister proc"); - remove_proc_entry(ATTONODE_NAME, - esas2r_proc_host->hostt->proc_dir); + proc_dir = scsi_template_proc_dir(esas2r_proc_host->hostt); + if (proc_dir) + remove_proc_entry(ATTONODE_NAME, proc_dir); unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME); esas2r_proc_major = 0; @@ -730,11 +731,13 @@ const char *esas2r_info(struct Scsi_Host *sh) esas2r_proc_major); if (esas2r_proc_major > 0) { - struct proc_dir_entry *pde; + struct proc_dir_entry *proc_dir; + struct proc_dir_entry *pde = NULL; - pde = proc_create(ATTONODE_NAME, 0, - sh->hostt->proc_dir, - &esas2r_proc_ops); + proc_dir = scsi_template_proc_dir(sh->hostt); + if (proc_dir) + pde = proc_create(ATTONODE_NAME, 0, proc_dir, + &esas2r_proc_ops); if (!pde) { esas2r_log_dev(ESAS2R_LOG_WARN, diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 6ec296321ffc..38774a272e62 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -2491,6 +2491,7 @@ static int __init fcoe_init(void) out_free: mutex_unlock(&fcoe_config_mutex); + fcoe_transport_detach(&fcoe_sw_transport); out_destroy: destroy_workqueue(fcoe_wq); return rc; diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index ddc048069af2..5c8d1ba3f8f3 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -2233,7 +2233,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip) if (fip->probe_tries < FIP_VN_RLIM_COUNT) { fip->probe_tries++; - wait = prandom_u32_max(FIP_VN_PROBE_WAIT); + wait = get_random_u32_below(FIP_VN_PROBE_WAIT); } else wait = FIP_VN_RLIM_INT; mod_timer(&fip->timer, jiffies + msecs_to_jiffies(wait)); @@ -3125,7 +3125,7 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip) fcoe_all_vn2vn, 0); fip->port_ka_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT + - prandom_u32_max(FIP_VN_BEACON_FUZZ)); + get_random_u32_below(FIP_VN_BEACON_FUZZ)); } if (time_before(fip->port_ka_time, next_time)) next_time = fip->port_ka_time; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index af658aa38fed..6260aa5ea6af 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -830,14 +830,15 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent, dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id); error = device_register(&ctlr->dev); - if (error) - goto out_del_q2; + if (error) { + destroy_workqueue(ctlr->devloss_work_q); + destroy_workqueue(ctlr->work_q); + put_device(&ctlr->dev); + return NULL; + } return ctlr; -out_del_q2: - destroy_workqueue(ctlr->devloss_work_q); - ctlr->devloss_work_q = NULL; out_del_q: destroy_workqueue(ctlr->work_q); ctlr->work_q = NULL; @@ -1036,16 +1037,16 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr, fcf->selected = new_fcf->selected; error = device_register(&fcf->dev); - if (error) - goto out_del; + if (error) { + put_device(&fcf->dev); + goto out; + } fcf->state = FCOE_FCF_STATE_CONNECTED; list_add_tail(&fcf->peers, &ctlr->fcfs); return fcf; -out_del: - kfree(fcf); out: return NULL; } diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 9aebf4a26b13..6f8a52a1b808 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -104,6 +104,7 @@ enum { enum dev_status { HISI_SAS_DEV_INIT, HISI_SAS_DEV_NORMAL, + HISI_SAS_DEV_NCQ_ERR, }; enum { diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 699b07abb6b0..41ba22f6c7f0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -177,22 +177,22 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx) } static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, - struct scsi_cmnd *scsi_cmnd) + struct request *rq) { int index; void *bitmap = hisi_hba->slot_index_tags; - if (scsi_cmnd) - return scsi_cmd_to_rq(scsi_cmnd)->tag; + if (rq) + return rq->tag + HISI_SAS_RESERVED_IPTT; spin_lock(&hisi_hba->lock); - index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, + index = find_next_zero_bit(bitmap, HISI_SAS_RESERVED_IPTT, hisi_hba->last_slot_index + 1); - if (index >= hisi_hba->slot_index_count) { + if (index >= HISI_SAS_RESERVED_IPTT) { index = find_next_zero_bit(bitmap, - hisi_hba->slot_index_count, - HISI_SAS_UNRESERVED_IPTT); - if (index >= hisi_hba->slot_index_count) { + HISI_SAS_RESERVED_IPTT, + 0); + if (index >= HISI_SAS_RESERVED_IPTT) { spin_unlock(&hisi_hba->lock); return -SAS_QUEUE_FULL; } @@ -461,11 +461,11 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) struct asd_sas_port *sas_port = device->port; struct hisi_sas_device *sas_dev = device->lldd_dev; bool internal_abort = sas_is_internal_abort(task); - struct scsi_cmnd *scmd = NULL; struct hisi_sas_dq *dq = NULL; struct hisi_sas_port *port; struct hisi_hba *hisi_hba; struct hisi_sas_slot *slot; + struct request *rq = NULL; struct device *dev; int rc; @@ -520,22 +520,12 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) return -ECOMM; } - if (task->uldd_task) { - struct ata_queued_cmd *qc; - - if (dev_is_sata(device)) { - qc = task->uldd_task; - scmd = qc->scsicmd; - } else { - scmd = task->uldd_task; - } - } - - if (scmd) { + rq = sas_task_find_rq(task); + if (rq) { unsigned int dq_index; u32 blk_tag; - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + blk_tag = blk_mq_unique_tag(rq); dq_index = blk_mq_unique_tag_to_hwq(blk_tag); dq = &hisi_hba->dq[dq_index]; } else { @@ -580,7 +570,7 @@ static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) if (!internal_abort && hisi_hba->hw->slot_index_alloc) rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); else - rc = hisi_sas_slot_index_alloc(hisi_hba, scmd); + rc = hisi_sas_slot_index_alloc(hisi_hba, rq); if (rc < 0) goto err_out_dif_dma_unmap; @@ -792,22 +782,14 @@ static int hisi_sas_dev_found(struct domain_device *device) if (parent_dev && dev_is_expander(parent_dev->dev_type)) { int phy_no; - u8 phy_num = parent_dev->ex_dev.num_phys; - struct ex_phy *phy; - - for (phy_no = 0; phy_no < phy_num; phy_no++) { - phy = &parent_dev->ex_dev.ex_phy[phy_no]; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(device->sas_addr)) - break; - } - if (phy_no == phy_num) { + phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device); + if (phy_no < 0) { dev_info(dev, "dev found: no attached " "dev:%016llx at ex:%016llx\n", SAS_ADDR(device->sas_addr), SAS_ADDR(parent_dev->sas_addr)); - rc = -EINVAL; + rc = phy_no; goto err_out; } } @@ -1341,7 +1323,6 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) { - struct sas_ha_struct *sas_ha = &hisi_hba->sha; struct asd_sas_port *_sas_port = NULL; int phy_no; @@ -1370,12 +1351,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL); } } - /* - * Ensure any bcast events are processed prior to calling async nexus - * reset calls from hisi_sas_clear_nexus_ha() -> - * hisi_sas_async_I_T_nexus_reset() - */ - sas_drain_work(sas_ha); } static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba) @@ -1547,6 +1522,7 @@ static int hisi_sas_abort_task(struct sas_task *task) struct hisi_sas_internal_abort_data internal_abort_data = { false }; struct domain_device *device = task->dev; struct hisi_sas_device *sas_dev = device->lldd_dev; + struct hisi_sas_slot *slot = task->lldd_task; struct hisi_hba *hisi_hba; struct device *dev; int rc = TMF_RESP_FUNC_FAILED; @@ -1560,7 +1536,6 @@ static int hisi_sas_abort_task(struct sas_task *task) spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { - struct hisi_sas_slot *slot = task->lldd_task; struct hisi_sas_cq *cq; if (slot) { @@ -1578,8 +1553,7 @@ static int hisi_sas_abort_task(struct sas_task *task) task->task_state_flags |= SAS_TASK_STATE_ABORTED; spin_unlock_irqrestore(&task->task_state_lock, flags); - if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { - struct hisi_sas_slot *slot = task->lldd_task; + if (slot && task->task_proto & SAS_PROTOCOL_SSP) { u16 tag = slot->idx; int rc2; @@ -1605,17 +1579,29 @@ static int hisi_sas_abort_task(struct sas_task *task) } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { if (task->dev->dev_type == SAS_SATA_DEV) { + struct ata_queued_cmd *qc = task->uldd_task; + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) { dev_err(dev, "abort task: internal abort failed\n"); goto out; } hisi_sas_dereg_device(hisi_hba, device); - rc = hisi_sas_softreset_ata_disk(device); + + /* + * If an ATA internal command times out in ATA EH, it + * need to execute soft reset, so check the scsicmd + */ + if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) && + qc && qc->scsicmd) { + hisi_sas_do_release_task(hisi_hba, task, slot); + rc = TMF_RESP_FUNC_COMPLETE; + } else { + rc = hisi_sas_softreset_ata_disk(device); + } } - } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { + } else if (slot && task->task_proto & SAS_PROTOCOL_SMP) { /* SMP */ - struct hisi_sas_slot *slot = task->lldd_task; u32 tag = slot->idx; struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; @@ -1708,13 +1694,15 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) return rc; } + /* Remote phy */ if (rc) return rc; - /* Remote phy */ if (dev_is_sata(device)) { - rc = sas_ata_wait_after_reset(device, - HISI_SAS_WAIT_PHYUP_TIMEOUT); + struct ata_link *link = &device->sata_dev.ap->link; + + rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT, + smp_ata_check_ready_type); } else { msleep(2000); } @@ -1729,6 +1717,9 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) struct device *dev = hisi_hba->dev; int rc; + if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) + sas_dev->dev_status = HISI_SAS_DEV_NORMAL; + rc = hisi_sas_internal_task_abort_dev(sas_dev, false); if (rc < 0) { dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc); @@ -1823,14 +1814,12 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) struct hisi_hba *hisi_hba = sas_ha->lldd_ha; HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); ASYNC_DOMAIN_EXCLUSIVE(async); - int i, ret; + int i; queue_work(hisi_hba->wq, &r.work); wait_for_completion(r.completion); - if (!r.done) { - ret = TMF_RESP_FUNC_FAILED; - goto out; - } + if (!r.done) + return TMF_RESP_FUNC_FAILED; for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; @@ -1847,9 +1836,7 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) async_synchronize_full_domain(&async); hisi_sas_release_tasks(hisi_hba); - ret = TMF_RESP_FUNC_COMPLETE; -out: - return ret; + return TMF_RESP_FUNC_COMPLETE; } static int hisi_sas_query_task(struct sas_task *task) @@ -1997,14 +1984,10 @@ void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) { struct asd_sas_phy *sas_phy = &phy->sas_phy; struct hisi_hba *hisi_hba = phy->hisi_hba; - struct sas_ha_struct *sha = &hisi_hba->sha; if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) return; - if (test_bit(SAS_HA_FROZEN, &sha->state)) - return; - sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast); @@ -2220,7 +2203,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) if (!hisi_hba->sata_breakpoint) goto err_out; - hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; + hisi_hba->last_slot_index = 0; hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); if (!hisi_hba->wq) { diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index d56b4bfd2767..0c3fcb807806 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -404,6 +404,11 @@ #define CMPLT_HDR_CMPLT_MSK (0x3 << CMPLT_HDR_CMPLT_OFF) #define CMPLT_HDR_ERROR_PHASE_OFF 2 #define CMPLT_HDR_ERROR_PHASE_MSK (0xff << CMPLT_HDR_ERROR_PHASE_OFF) +/* bit[9:2] Error Phase */ +#define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF \ + 8 +#define ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK \ + (0x1 << ERR_PHASE_RESPONSE_FRAME_REV_STAGE_OFF) #define CMPLT_HDR_RSPNS_XFRD_OFF 10 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) #define CMPLT_HDR_RSPNS_GOOD_OFF 11 @@ -423,8 +428,17 @@ #define CMPLT_HDR_DEV_ID_OFF 16 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF) /* dw3 */ +#define SATA_DISK_IN_ERROR_STATUS_OFF 8 +#define SATA_DISK_IN_ERROR_STATUS_MSK (0x1 << SATA_DISK_IN_ERROR_STATUS_OFF) +#define CMPLT_HDR_SATA_DISK_ERR_OFF 16 +#define CMPLT_HDR_SATA_DISK_ERR_MSK (0x1 << CMPLT_HDR_SATA_DISK_ERR_OFF) #define CMPLT_HDR_IO_IN_TARGET_OFF 17 #define CMPLT_HDR_IO_IN_TARGET_MSK (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF) +/* bit[23:18] ERR_FIS_ATA_STATUS */ +#define FIS_ATA_STATUS_ERR_OFF 18 +#define FIS_ATA_STATUS_ERR_MSK (0x1 << FIS_ATA_STATUS_ERR_OFF) +#define FIS_TYPE_SDB_OFF 31 +#define FIS_TYPE_SDB_MSK (0x1 << FIS_TYPE_SDB_OFF) /* ITCT header */ /* qw0 */ @@ -2148,6 +2162,18 @@ static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p) return IRQ_HANDLED; } +static bool is_ncq_err_v3_hw(struct hisi_sas_complete_v3_hdr *complete_hdr) +{ + u32 dw0, dw3; + + dw0 = le32_to_cpu(complete_hdr->dw0); + dw3 = le32_to_cpu(complete_hdr->dw3); + + return (dw0 & ERR_PHASE_RESPONSE_FRAME_REV_STAGE_MSK) && + (dw3 & FIS_TYPE_SDB_MSK) && + (dw3 & FIS_ATA_STATUS_ERR_MSK); +} + static bool slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) @@ -2195,7 +2221,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; - } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) { + } else if ((dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) || + (dw3 & SATA_DISK_IN_ERROR_STATUS_MSK)) { ts->stat = SAS_PHY_DOWN; slot->abort = 1; } else { @@ -2381,14 +2408,34 @@ static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) while (rd_point != wr_point) { struct hisi_sas_complete_v3_hdr *complete_hdr; struct device *dev = hisi_hba->dev; - u32 dw1; + u32 dw0, dw1, dw3; int iptt; complete_hdr = &complete_queue[rd_point]; + dw0 = le32_to_cpu(complete_hdr->dw0); dw1 = le32_to_cpu(complete_hdr->dw1); + dw3 = le32_to_cpu(complete_hdr->dw3); iptt = dw1 & CMPLT_HDR_IPTT_MSK; - if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { + if (unlikely((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) && + (dw3 & CMPLT_HDR_SATA_DISK_ERR_MSK)) { + int device_id = (dw1 & CMPLT_HDR_DEV_ID_MSK) >> + CMPLT_HDR_DEV_ID_OFF; + struct hisi_sas_itct *itct = + &hisi_hba->itct[device_id]; + struct hisi_sas_device *sas_dev = + &hisi_hba->devices[device_id]; + struct domain_device *device = sas_dev->sas_device; + + dev_err(dev, "erroneous completion disk err dev id=%d sas_addr=0x%llx CQ hdr: 0x%x 0x%x 0x%x 0x%x\n", + device_id, itct->sas_addr, dw0, dw1, + complete_hdr->act, dw3); + + if (is_ncq_err_v3_hw(complete_hdr)) + sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; + + sas_ata_device_link_abort(device, true); + } else if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) { slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 9857dba09c95..12346e2297fd 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -519,7 +519,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) "failed to create tmf workq\n"); goto fail; } - scsi_proc_hostdir_add(shost->hostt); + if (scsi_proc_hostdir_add(shost->hostt) < 0) + goto fail; return shost; fail: /* diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index f8e832b1bc46..4dbf51e2623a 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -8925,7 +8925,7 @@ clean1: /* wq/aer/h */ destroy_workqueue(h->monitor_ctlr_wq); h->monitor_ctlr_wq = NULL; } - kfree(h); + hpda_free_ctlr_info(h); return rc; } @@ -9786,7 +9786,8 @@ static int hpsa_add_sas_host(struct ctlr_info *h) return 0; free_sas_phy: - hpsa_free_sas_phy(hpsa_sas_phy); + sas_phy_free(hpsa_sas_phy->phy); + kfree(hpsa_sas_phy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); free_sas_node: @@ -9822,10 +9823,12 @@ static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); if (rc) - goto free_sas_port; + goto free_sas_rphy; return 0; +free_sas_rphy: + sas_rphy_free(rphy); free_sas_port: hpsa_free_sas_port(hpsa_sas_port); device->sas_port = NULL; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9d01a3e3c26a..2022ffb45041 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -10872,11 +10872,19 @@ static struct notifier_block ipr_notifier = { **/ static int __init ipr_init(void) { + int rc; + ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", IPR_DRIVER_VERSION, IPR_DRIVER_DATE); register_reboot_notifier(&ipr_notifier); - return pci_register_driver(&ipr_driver); + rc = pci_register_driver(&ipr_driver); + if (rc) { + unregister_reboot_notifier(&ipr_notifier); + return rc; + } + + return 0; } /** diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 942fc60f7c21..0f32ded246d0 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -75,7 +75,6 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) struct fc_seq_els_data rjt_data; unsigned int len; int redisc = 0; - enum fc_els_rscn_ev_qual ev_qual; enum fc_els_rscn_addr_fmt fmt; LIST_HEAD(disc_ports); struct fc_disc_port *dp, *next; @@ -107,8 +106,6 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) goto reject; for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) { - ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT; - ev_qual &= ELS_RSCN_EV_QUAL_MASK; fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT; fmt &= ELS_RSCN_ADDR_FMT_MASK; /* diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index d95f4bcdeb2e..ef2fc860257e 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -2071,9 +2071,9 @@ static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) return 0; } -enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) +enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) { - enum blk_eh_timer_return rc = BLK_EH_DONE; + enum scsi_timeout_action rc = SCSI_EH_NOT_HANDLED; struct iscsi_task *task = NULL, *running_task; struct iscsi_cls_session *cls_session; struct iscsi_session *session; @@ -2093,7 +2093,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * Raced with completion. Blk layer has taken ownership * so let timeout code complete it now. */ - rc = BLK_EH_DONE; + rc = SCSI_EH_NOT_HANDLED; spin_unlock(&session->back_lock); goto done; } @@ -2102,7 +2102,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * Racing with the completion path right now, so give it more * time so that path can complete it like normal. */ - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; task = NULL; spin_unlock(&session->back_lock); goto done; @@ -2120,21 +2120,21 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) if (unlikely(system_state != SYSTEM_RUNNING)) { sc->result = DID_NO_CONNECT << 16; ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); - rc = BLK_EH_DONE; + rc = SCSI_EH_NOT_HANDLED; goto done; } /* * We are probably in the middle of iscsi recovery so let * that complete and handle the error. */ - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } conn = session->leadconn; if (!conn) { /* In the middle of shuting down */ - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } @@ -2151,7 +2151,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) "Last data xfer at %lu. Last timeout was at " "%lu\n.", task->last_xfer, task->last_timeout); task->have_checked_conn = false; - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } @@ -2162,7 +2162,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) * and can let the iscsi eh handle it */ if (iscsi_has_ping_timed_out(conn)) { - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } @@ -2200,7 +2200,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) task->last_xfer, running_task->last_xfer, task->last_timeout); spin_unlock(&session->back_lock); - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } } @@ -2216,14 +2216,14 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) */ if (READ_ONCE(conn->ping_task)) { task->have_checked_conn = true; - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; goto done; } /* Make sure there is a transport check done */ iscsi_send_nopout(conn, NULL); task->have_checked_conn = true; - rc = BLK_EH_RESET_TIMER; + rc = SCSI_EH_RESET_TIMER; done: spin_unlock_bh(&session->frwd_lock); @@ -2232,7 +2232,7 @@ done: task->last_timeout = jiffies; iscsi_put_task(task); } - ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? + ISCSI_DBG_EH(session, "return %s\n", rc == SCSI_EH_RESET_TIMER ? "timer reset" : "shutdown or nh"); return rc; } diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index d35c9296f738..1ccce706167a 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -101,7 +101,7 @@ static void sas_ata_task_done(struct sas_task *task) spin_lock_irqsave(ap->lock, flags); /* check if we lost the race with libata/sas_ata_post_internal() */ - if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) { + if (unlikely(ata_port_is_frozen(ap))) { spin_unlock_irqrestore(ap->lock, flags); if (qc->scsicmd) goto qc_already_gone; @@ -139,8 +139,8 @@ static void sas_ata_task_done(struct sas_task *task) qc->flags |= ATA_QCFLAG_FAILED; } - dev->sata_dev.fis[3] = 0x04; /* status err */ - dev->sata_dev.fis[2] = ATA_ERR; + dev->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ + dev->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ } } @@ -287,6 +287,31 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) return 1; } +int smp_ata_check_ready_type(struct ata_link *link) +{ + struct domain_device *dev = link->ap->private_data; + struct sas_phy *phy = sas_get_local_phy(dev); + struct domain_device *ex_dev = dev->parent; + enum sas_device_type type = SAS_PHY_UNUSED; + u8 sas_addr[SAS_ADDR_SIZE]; + int res; + + res = sas_get_phy_attached_dev(ex_dev, phy->number, sas_addr, &type); + sas_put_local_phy(phy); + if (res) + return res; + + switch (type) { + case SAS_SATA_PENDING: + return 0; + case SAS_END_DEVICE: + return 1; + default: + return -ENODEV; + } +} +EXPORT_SYMBOL_GPL(smp_ata_check_ready_type); + static int smp_ata_check_ready(struct ata_link *link) { int res; @@ -358,7 +383,7 @@ static int sas_ata_printk(const char *level, const struct domain_device *ddev, return r; } -int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline) +static int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline) { struct sata_device *sata_dev = &dev->sata_dev; int (*check_ready)(struct ata_link *link); @@ -380,7 +405,6 @@ int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline) return ret; } -EXPORT_SYMBOL_GPL(sas_ata_wait_after_reset); static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, unsigned long deadline) @@ -861,6 +885,21 @@ void sas_ata_wait_eh(struct domain_device *dev) ata_port_wait_eh(ap); } +void sas_ata_device_link_abort(struct domain_device *device, bool force_reset) +{ + struct ata_port *ap = device->sata_dev.ap; + struct ata_link *link = &ap->link; + + device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ + device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ + + link->eh_info.err_mask |= AC_ERR_DEV; + if (force_reset) + link->eh_info.action |= ATA_EH_RESET; + ata_link_abort(link); +} +EXPORT_SYMBOL_GPL(sas_ata_device_link_abort); + int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id) { struct sas_tmf_task tmf_task = {}; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 5ce251830104..a04cad620e93 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -738,9 +738,7 @@ static void sas_ex_get_linkrate(struct domain_device *parent, phy->phy_state == PHY_NOT_PRESENT) continue; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(child->sas_addr)) { - + if (sas_phy_match_dev_addr(child, phy)) { child->min_linkrate = min(parent->min_linkrate, phy->linkrate); child->max_linkrate = max(parent->max_linkrate, @@ -1007,13 +1005,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } /* Parent and domain coherency */ - if (!dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == - SAS_ADDR(dev->port->sas_addr))) { + if (!dev->parent && sas_phy_match_port_addr(dev->port, ex_phy)) { sas_add_parent_port(dev, phy_id); return 0; } - if (dev->parent && (SAS_ADDR(ex_phy->attached_sas_addr) == - SAS_ADDR(dev->parent->sas_addr))) { + if (dev->parent && sas_phy_match_dev_addr(dev->parent, ex_phy)) { sas_add_parent_port(dev, phy_id); if (ex_phy->routing_attr == TABLE_ROUTING) sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1); @@ -1312,7 +1308,7 @@ static int sas_check_parent_topology(struct domain_device *child) parent_phy->phy_state == PHY_NOT_PRESENT) continue; - if (SAS_ADDR(parent_phy->attached_sas_addr) != SAS_ADDR(child->sas_addr)) + if (!sas_phy_match_dev_addr(child, parent_phy)) continue; child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; @@ -1522,8 +1518,7 @@ static int sas_configure_parent(struct domain_device *parent, struct ex_phy *phy = &ex_parent->ex_phy[i]; if ((phy->routing_attr == TABLE_ROUTING) && - (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(child->sas_addr))) { + sas_phy_match_dev_addr(child, phy)) { res = sas_configure_phy(parent, i, sas_addr, include); if (res) return res; @@ -1693,8 +1688,8 @@ static int sas_get_phy_change_count(struct domain_device *dev, return res; } -static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, - u8 *sas_addr, enum sas_device_type *type) +int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, + u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_disc_resp *disc_resp; @@ -1858,8 +1853,7 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, if (last) { list_for_each_entry_safe(child, n, &ex_dev->children, siblings) { - if (SAS_ADDR(child->sas_addr) == - SAS_ADDR(phy->attached_sas_addr)) { + if (sas_phy_match_dev_addr(child, phy)) { set_bit(SAS_DEV_GONE, &child->state); if (dev_is_expander(child->dev_type)) sas_unregister_ex_tree(parent->port, child); @@ -1941,8 +1935,7 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) if (res) return res; list_for_each_entry(child, &dev->ex_dev.children, siblings) { - if (SAS_ADDR(child->sas_addr) == - SAS_ADDR(ex_phy->attached_sas_addr)) { + if (sas_phy_match_dev_addr(child, ex_phy)) { if (dev_is_expander(child->dev_type)) res = sas_discover_bfs_by_root(child); break; @@ -2064,8 +2057,7 @@ static int sas_rediscover(struct domain_device *dev, const int phy_id) if (i == phy_id) continue; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(changed_phy->attached_sas_addr)) { + if (sas_phy_addr_match(phy, changed_phy)) { last = false; break; } @@ -2107,6 +2099,22 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev) return res; } +int sas_find_attached_phy_id(struct expander_device *ex_dev, + struct domain_device *dev) +{ + struct ex_phy *phy; + int phy_id; + + for (phy_id = 0; phy_id < ex_dev->num_phys; phy_id++) { + phy = &ex_dev->ex_phy[phy_id]; + if (sas_phy_match_dev_addr(dev, phy)) + return phy_id; + } + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(sas_find_attached_phy_id); + void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index e4f77072a58d..f2c05ebeb72f 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -35,7 +35,6 @@ struct sas_task *sas_alloc_task(gfp_t flags) return task; } -EXPORT_SYMBOL_GPL(sas_alloc_task); struct sas_task *sas_alloc_slow_task(gfp_t flags) { @@ -56,7 +55,6 @@ struct sas_task *sas_alloc_slow_task(gfp_t flags) return task; } -EXPORT_SYMBOL_GPL(sas_alloc_slow_task); void sas_free_task(struct sas_task *task) { @@ -65,7 +63,6 @@ void sas_free_task(struct sas_task *task) kmem_cache_free(sas_task_cache, task); } } -EXPORT_SYMBOL_GPL(sas_free_task); /*------------ SAS addr hash -----------*/ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 8d0ad3abc7b5..6f593fa69b58 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -52,6 +52,10 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha); struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags); void sas_free_event(struct asd_sas_event *event); +struct sas_task *sas_alloc_task(gfp_t flags); +struct sas_task *sas_alloc_slow_task(gfp_t flags); +void sas_free_task(struct sas_task *task); + int sas_register_ports(struct sas_ha_struct *sas_ha); void sas_unregister_ports(struct sas_ha_struct *sas_ha); @@ -84,6 +88,8 @@ struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id); int sas_ex_phy_discover(struct domain_device *dev, int single); int sas_get_report_phy_sata(struct domain_device *dev, int phy_id, struct smp_rps_resp *rps_resp); +int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, + u8 *sas_addr, enum sas_device_type *type); int sas_try_ata_reset(struct asd_sas_phy *phy); void sas_hae_reset(struct work_struct *work); @@ -111,6 +117,23 @@ static inline void sas_smp_host_handler(struct bsg_job *job, } #endif +static inline bool sas_phy_match_dev_addr(struct domain_device *dev, + struct ex_phy *phy) +{ + return SAS_ADDR(dev->sas_addr) == SAS_ADDR(phy->attached_sas_addr); +} + +static inline bool sas_phy_match_port_addr(struct asd_sas_port *port, + struct ex_phy *phy) +{ + return SAS_ADDR(port->sas_addr) == SAS_ADDR(phy->attached_sas_addr); +} + +static inline bool sas_phy_addr_match(struct ex_phy *p1, struct ex_phy *p2) +{ + return SAS_ADDR(p1->attached_sas_addr) == SAS_ADDR(p2->attached_sas_addr); +} + static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err) { pr_warn("%s: for %s device %016llx returned %d\n", diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index ef1481326fd7..77e1b2911cb4 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -134,7 +134,7 @@ lpfc_cmf_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "Congestion Mgmt Info: E2Eattr %d Ver %d " "CMF %d cnt %d\n", - phba->sli4_hba.pc_sli4_params.mi_ver, + phba->sli4_hba.pc_sli4_params.mi_cap, cp ? cp->cgn_info_version : 0, phba->sli4_hba.pc_sli4_params.cmf, phba->cmf_timer_cnt); @@ -1877,6 +1877,122 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) return 0; } +static ssize_t +lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + int rc; + int len = 0; + struct lpfc_rdp_context *rdp_context; + u16 temperature; + u16 rx_power; + u16 tx_bias; + u16 tx_power; + u16 vcc; + char chbuf[128]; + u16 wavelength = 0; + struct sff_trasnceiver_codes_byte7 *trasn_code_byte7; + + /* Get transceiver information */ + rdp_context = kmalloc(sizeof(*rdp_context), GFP_KERNEL); + + rc = lpfc_get_sfp_info_wait(phba, rdp_context); + if (rc) { + len = scnprintf(buf, PAGE_SIZE - len, "SFP info NA:\n"); + goto out_free_rdp; + } + + strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16); + chbuf[16] = 0; + + len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf); + len += scnprintf(buf + len, PAGE_SIZE - len, + "VendorOUI:\t%02x-%02x-%02x\n", + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI], + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 1], + (uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 2]); + strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16); + chbuf[16] = 0; + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf); + strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16); + chbuf[16] = 0; + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf); + strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4); + chbuf[4] = 0; + len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf); + strncpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8); + chbuf[8] = 0; + len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf); + len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_IDENTIFIER]); + len += scnprintf(buf + len, PAGE_SIZE - len, "ExtIdentifier:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_EXT_IDENTIFIER]); + len += scnprintf(buf + len, PAGE_SIZE - len, "Connector:\t%xh\n", + (uint8_t)rdp_context->page_a0[SSF_CONNECTOR]); + wavelength = (rdp_context->page_a0[SSF_WAVELENGTH_B1] << 8) | + rdp_context->page_a0[SSF_WAVELENGTH_B0]; + + len += scnprintf(buf + len, PAGE_SIZE - len, "Wavelength:\t%d nm\n", + wavelength); + trasn_code_byte7 = (struct sff_trasnceiver_codes_byte7 *) + &rdp_context->page_a0[SSF_TRANSCEIVER_CODE_B7]; + + len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t"); + if (*(uint8_t *)trasn_code_byte7 == 0) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "Unknown\n"); + } else { + if (trasn_code_byte7->fc_sp_100MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "1 "); + if (trasn_code_byte7->fc_sp_200mb) + len += scnprintf(buf + len, PAGE_SIZE - len, + "2 "); + if (trasn_code_byte7->fc_sp_400MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "4 "); + if (trasn_code_byte7->fc_sp_800MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "8 "); + if (trasn_code_byte7->fc_sp_1600MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "16 "); + if (trasn_code_byte7->fc_sp_3200MB) + len += scnprintf(buf + len, PAGE_SIZE - len, + "32 "); + if (trasn_code_byte7->speed_chk_ecc) + len += scnprintf(buf + len, PAGE_SIZE - len, + "64 "); + len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n"); + } + temperature = (rdp_context->page_a2[SFF_TEMPERATURE_B1] << 8 | + rdp_context->page_a2[SFF_TEMPERATURE_B0]); + vcc = (rdp_context->page_a2[SFF_VCC_B1] << 8 | + rdp_context->page_a2[SFF_VCC_B0]); + tx_power = (rdp_context->page_a2[SFF_TXPOWER_B1] << 8 | + rdp_context->page_a2[SFF_TXPOWER_B0]); + tx_bias = (rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B1] << 8 | + rdp_context->page_a2[SFF_TX_BIAS_CURRENT_B0]); + rx_power = (rdp_context->page_a2[SFF_RXPOWER_B1] << 8 | + rdp_context->page_a2[SFF_RXPOWER_B0]); + + len += scnprintf(buf + len, PAGE_SIZE - len, + "Temperature:\tx%04x C\n", temperature); + len += scnprintf(buf + len, PAGE_SIZE - len, "Vcc:\t\tx%04x V\n", vcc); + len += scnprintf(buf + len, PAGE_SIZE - len, + "TxBiasCurrent:\tx%04x mA\n", tx_bias); + len += scnprintf(buf + len, PAGE_SIZE - len, "TxPower:\tx%04x mW\n", + tx_power); + len += scnprintf(buf + len, PAGE_SIZE - len, "RxPower:\tx%04x mW\n", + rx_power); +out_free_rdp: + kfree(rdp_context); + return len; +} + /** * lpfc_board_mode_show - Return the state of the board * @dev: class device that is converted into a Scsi_host. @@ -2810,6 +2926,7 @@ static DEVICE_ATTR_RO(lpfc_drvr_version); static DEVICE_ATTR_RO(lpfc_enable_fip); static DEVICE_ATTR(board_mode, S_IRUGO | S_IWUSR, lpfc_board_mode_show, lpfc_board_mode_store); +static DEVICE_ATTR_RO(lpfc_xcvr_data); static DEVICE_ATTR(issue_reset, S_IWUSR, NULL, lpfc_issue_reset); static DEVICE_ATTR(max_vpi, S_IRUGO, lpfc_max_vpi_show, NULL); static DEVICE_ATTR(used_vpi, S_IRUGO, lpfc_used_vpi_show, NULL); @@ -5906,6 +6023,7 @@ static struct attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_fcp_wait_abts_rsp.attr, &dev_attr_nport_evt_cnt.attr, &dev_attr_board_mode.attr, + &dev_attr_lpfc_xcvr_data.attr, &dev_attr_max_vpi.attr, &dev_attr_used_vpi.attr, &dev_attr_max_rpi.attr, diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d2d207791056..8928f016d09e 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -687,3 +687,6 @@ int lpfc_issue_els_qfpa(struct lpfc_vport *vport); void lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); + +int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, + struct lpfc_rdp_context *rdp_context); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 863b2125fed6..919741bbe267 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -952,6 +952,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, uint16_t fcf_index; int rc; u32 ulp_status, ulp_word4, tmo; + bool flogi_in_retry = false; /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { @@ -1022,8 +1023,23 @@ stop_rr_fcf_flogi: phba->hba_flag, phba->fcf.fcf_flag); /* Check for retry */ - if (lpfc_els_retry(phba, cmdiocb, rspiocb)) + if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { + /* Address a timing race with dev_loss. If dev_loss + * is active on this FPort node, put the initial ref + * count back to stop premature node release actions. + */ + lpfc_check_nlp_post_devloss(vport, ndlp); + flogi_in_retry = true; goto out; + } + + /* The FLOGI will not be retried. If the FPort node is not + * registered with the SCSI transport, remove the initial + * reference to trigger node release. + */ + if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && + !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) + lpfc_nlp_put(ndlp); lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT, "0150 FLOGI failure Status:x%x/x%x " @@ -1086,7 +1102,7 @@ stop_rr_fcf_flogi: spin_unlock_irq(shost->host_lock); /* - * The FLogI succeeded. Sync the data for the CPU before + * The FLOGI succeeded. Sync the data for the CPU before * accessing it. */ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); @@ -1108,6 +1124,12 @@ stop_rr_fcf_flogi: vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA | LPFC_VMID_TYPE_PRIO); + /* + * Address a timing race with dev_loss. If dev_loss is active on + * this FPort node, put the initial ref count back to stop premature + * node release actions. + */ + lpfc_check_nlp_post_devloss(vport, ndlp); if (vport->port_state == LPFC_FLOGI) { /* * If Common Service Parameters indicate Nport @@ -1198,7 +1220,9 @@ flogifail: lpfc_issue_clear_la(phba, vport); } out: - phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; + if (!flogi_in_retry) + phba->hba_flag &= ~HBA_FLOGI_OUTSTANDING; + lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } @@ -1365,15 +1389,17 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 1; } + /* Avoid race with FLOGI completion and hba_flags. */ + phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); + rc = lpfc_issue_fabric_iocb(phba, elsiocb); if (rc == IOCB_ERROR) { + phba->hba_flag &= ~(HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); lpfc_els_free_iocb(phba, elsiocb); lpfc_nlp_put(ndlp); return 1; } - phba->hba_flag |= (HBA_FLOGI_ISSUED | HBA_FLOGI_OUTSTANDING); - /* Clear external loopback plug detected flag */ phba->link_flag &= ~LS_EXTERNAL_LOOPBACK; @@ -7190,6 +7216,134 @@ rdp_fail: return 1; } +int lpfc_get_sfp_info_wait(struct lpfc_hba *phba, + struct lpfc_rdp_context *rdp_context) +{ + LPFC_MBOXQ_t *mbox = NULL; + int rc; + struct lpfc_dmabuf *mp; + struct lpfc_dmabuf *mpsave; + void *virt; + MAILBOX_t *mb; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_ELS, + "7205 failed to allocate mailbox memory"); + return 1; + } + + if (lpfc_sli4_dump_page_a0(phba, mbox)) + goto sfp_fail; + mp = mbox->ctx_buf; + mpsave = mp; + virt = mp->virt; + if (phba->sli_rev < LPFC_SLI_REV4) { + mb = &mbox->u.mb; + mb->un.varDmp.cv = 1; + mb->un.varDmp.co = 1; + mb->un.varWords[2] = 0; + mb->un.varWords[3] = DMP_SFF_PAGE_A0_SIZE / 4; + mb->un.varWords[4] = 0; + mb->un.varWords[5] = 0; + mb->un.varWords[6] = 0; + mb->un.varWords[7] = 0; + mb->un.varWords[8] = 0; + mb->un.varWords[9] = 0; + mb->un.varWords[10] = 0; + mbox->in_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; + mbox->out_ext_byte_len = DMP_SFF_PAGE_A0_SIZE; + mbox->mbox_offset_word = 5; + mbox->ctx_buf = virt; + } else { + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A0_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + } + mbox->vport = phba->pport; + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + + rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + if (rc == MBX_NOT_FINISHED) { + rc = 1; + goto error; + } + + if (phba->sli_rev == LPFC_SLI_REV4) + mp = (struct lpfc_dmabuf *)(mbox->ctx_buf); + else + mp = mpsave; + + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { + rc = 1; + goto error; + } + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a0, + DMP_SFF_PAGE_A0_SIZE); + + memset(mbox, 0, sizeof(*mbox)); + memset(mp->virt, 0, DMP_SFF_PAGE_A2_SIZE); + INIT_LIST_HEAD(&mp->list); + + /* save address for completion */ + mbox->ctx_buf = mp; + mbox->vport = phba->pport; + + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_DUMP_MEMORY); + bf_set(lpfc_mbx_memory_dump_type3_type, + &mbox->u.mqe.un.mem_dump_type3, DMP_LMSD); + bf_set(lpfc_mbx_memory_dump_type3_link, + &mbox->u.mqe.un.mem_dump_type3, phba->sli4_hba.physical_port); + bf_set(lpfc_mbx_memory_dump_type3_page_no, + &mbox->u.mqe.un.mem_dump_type3, DMP_PAGE_A2); + if (phba->sli_rev < LPFC_SLI_REV4) { + mb = &mbox->u.mb; + mb->un.varDmp.cv = 1; + mb->un.varDmp.co = 1; + mb->un.varWords[2] = 0; + mb->un.varWords[3] = DMP_SFF_PAGE_A2_SIZE / 4; + mb->un.varWords[4] = 0; + mb->un.varWords[5] = 0; + mb->un.varWords[6] = 0; + mb->un.varWords[7] = 0; + mb->un.varWords[8] = 0; + mb->un.varWords[9] = 0; + mb->un.varWords[10] = 0; + mbox->in_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; + mbox->out_ext_byte_len = DMP_SFF_PAGE_A2_SIZE; + mbox->mbox_offset_word = 5; + mbox->ctx_buf = virt; + } else { + bf_set(lpfc_mbx_memory_dump_type3_length, + &mbox->u.mqe.un.mem_dump_type3, DMP_SFF_PAGE_A2_SIZE); + mbox->u.mqe.un.mem_dump_type3.addr_lo = putPaddrLow(mp->phys); + mbox->u.mqe.un.mem_dump_type3.addr_hi = putPaddrHigh(mp->phys); + } + + mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context; + rc = lpfc_sli_issue_mbox_wait(phba, mbox, 30); + if (bf_get(lpfc_mqe_status, &mbox->u.mqe)) { + rc = 1; + goto error; + } + rc = 0; + + lpfc_sli_bemem_bcopy(mp->virt, &rdp_context->page_a2, + DMP_SFF_PAGE_A2_SIZE); + +error: + mbox->ctx_buf = mpsave; + lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); + + return rc; + +sfp_fail: + mempool_free(mbox, phba->mbox_mem_pool); + return 1; +} + /* * lpfc_els_rcv_rdp - Process an unsolicited RDP ELS. * @vport: pointer to a host virtual N_Port data structure. @@ -9044,15 +9198,10 @@ static int lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp) { - struct lpfc_dmabuf *pcmd; - uint32_t *lp; uint32_t did; did = get_job_els_rsp64_did(vport->phba, cmdiocb); - pcmd = cmdiocb->cmd_dmabuf; - lp = (uint32_t *)pcmd->virt; - lp++; /* FARP-RSP received from DID <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0600 FARP-RSP received from DID x%x\n", did); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index d38ebd7281b9..80375d73b732 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -426,10 +426,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) name = (uint8_t *)&ndlp->nlp_portname; phba = vport->phba; - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); - if (phba->sli_rev == LPFC_SLI_REV4) fcf_inuse = lpfc_fcf_inuse(phba); @@ -451,22 +447,36 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); + + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); return fcf_inuse; } /* Fabric nodes are done. */ if (ndlp->nlp_type & NLP_FABRIC) { spin_lock_irqsave(&ndlp->lock, iflags); - /* In massive vport configuration settings, it's possible - * dev_loss_tmo fired during node recovery. So, check if - * fabric nodes are in discovery states outstanding. + + /* In massive vport configuration settings or when the FLOGI + * completes with a sequence timeout, it's possible + * dev_loss_tmo fired during node recovery. The driver has to + * account for this race to allow for recovery and keep + * the reference counting correct. */ switch (ndlp->nlp_DID) { case Fabric_DID: fc_vport = vport->fc_vport; - if (fc_vport && - fc_vport->vport_state == FC_VPORT_INITIALIZING) - recovering = true; + if (fc_vport) { + /* NPIV path. */ + if (fc_vport->vport_state == + FC_VPORT_INITIALIZING) + recovering = true; + } else { + /* Physical port path. */ + if (phba->hba_flag & HBA_FLOGI_OUTSTANDING) + recovering = true; + } break; case Fabric_Cntl_DID: if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) @@ -514,6 +524,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) return fcf_inuse; } + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_nlp_put(ndlp); return fcf_inuse; } @@ -552,6 +565,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) return fcf_inuse; } + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 5288fc69908a..fb3504dbb899 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -3162,7 +3162,8 @@ struct lpfc_mbx_memory_dump_type3 { #define SFF_LENGTH_COPPER 18 #define SSF_LENGTH_50UM_OM3 19 #define SSF_VENDOR_NAME 20 -#define SSF_VENDOR_OUI 36 +#define SSF_TRANSCEIVER2 36 +#define SSF_VENDOR_OUI 37 #define SSF_VENDOR_PN 40 #define SSF_VENDOR_REV 56 #define SSF_WAVELENGTH_B1 60 @@ -3281,7 +3282,7 @@ struct sff_trasnceiver_codes_byte6 { struct sff_trasnceiver_codes_byte7 { uint8_t fc_sp_100MB:1; /* 100 MB/sec */ - uint8_t reserve:1; + uint8_t speed_chk_ecc:1; uint8_t fc_sp_200mb:1; /* 200 MB/sec */ uint8_t fc_sp_3200MB:1; /* 3200 MB/sec */ uint8_t fc_sp_400MB:1; /* 400 MB/sec */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b535f1fd3010..25ba20e42825 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -36,7 +36,6 @@ #include <linux/firmware.h> #include <linux/miscdevice.h> #include <linux/percpu.h> -#include <linux/msi.h> #include <linux/irq.h> #include <linux/bitops.h> #include <linux/crash_dump.h> @@ -699,6 +698,8 @@ lpfc_sli4_refresh_params(struct lpfc_hba *phba) return rc; } mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + phba->sli4_hba.pc_sli4_params.mi_cap = + bf_get(cfg_mi_ver, mbx_sli4_parameters); /* Are we forcing MI off via module parameter? */ if (phba->cfg_enable_mi) @@ -10093,17 +10094,15 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) qmin = phba->sli4_hba.max_cfg_param.max_wq; if (phba->sli4_hba.max_cfg_param.max_cq < qmin) qmin = phba->sli4_hba.max_cfg_param.max_cq; - if (phba->sli4_hba.max_cfg_param.max_eq < qmin) - qmin = phba->sli4_hba.max_cfg_param.max_eq; /* - * Whats left after this can go toward NVME / FCP. - * The minus 4 accounts for ELS, NVME LS, MBOX - * plus one extra. When configured for - * NVMET, FCP io channel WQs are not created. + * Reserve 4 (ELS, NVME LS, MBOX, plus one extra) and + * the remainder can be used for NVME / FCP. */ qmin -= 4; + if (phba->sli4_hba.max_cfg_param.max_eq < qmin) + qmin = phba->sli4_hba.max_cfg_param.max_eq; - /* Check to see if there is enough for NVME */ + /* Check to see if there is enough for default cfg */ if ((phba->cfg_irq_chann > qmin) || (phba->cfg_hdw_queue > qmin)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -13842,6 +13841,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) mbx_sli4_parameters); phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); + sli4_params->mi_cap = bf_get(cfg_mi_ver, mbx_sli4_parameters); /* Check for Extended Pre-Registered SGL support */ phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 99d06dc7ddf6..182aaae60386 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1373,7 +1373,6 @@ static void __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { struct lpfc_sglq *sglq; - size_t start_clean = offsetof(struct lpfc_iocbq, wqe); unsigned long iflag = 0; struct lpfc_sli_ring *pring; @@ -1430,7 +1429,7 @@ out: /* * Clean all volatile data fields, preserve iotag and node struct. */ - memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); + memset_startat(iocbq, 0, wqe); iocbq->sli4_lxritag = NO_XRI; iocbq->sli4_xritag = NO_XRI; iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF | @@ -1453,12 +1452,11 @@ out: static void __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { - size_t start_clean = offsetof(struct lpfc_iocbq, iocb); /* * Clean all volatile data fields, preserve iotag and node struct. */ - memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); + memset_startat(iocbq, 0, iocb); iocbq->sli4_xritag = NO_XRI; list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } @@ -1848,6 +1846,24 @@ lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, phba->cmf_link_byte_count); bwpcent = div64_u64(bw * 100 + slop, phba->cmf_link_byte_count); + /* Because of bytes adjustment due to shorter timer in + * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and + * may seem like BW is above 100%. + */ + if (bwpcent > 100) + bwpcent = 100; + + if (phba->cmf_max_bytes_per_interval < bw && + bwpcent > 95) + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6208 Congestion bandwidth " + "limits removed\n"); + else if ((phba->cmf_max_bytes_per_interval > bw) && + ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95)) + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "6209 Congestion bandwidth " + "limits in effect\n"); + if (asig) { lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, "6237 BW Threshold %lld%% (%lld): " @@ -8150,10 +8166,10 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, "IO_cnt", "Info", "BWutil(ms)"); } - /* Needs to be _bh because record is called from timer interrupt + /* Needs to be _irq because record is called from timer interrupt * context */ - spin_lock_bh(ring_lock); + spin_lock_irq(ring_lock); while (*head_idx != *tail_idx) { entry = &ring[*head_idx]; @@ -8197,7 +8213,7 @@ u32 lpfc_rx_monitor_report(struct lpfc_hba *phba, if (cnt >= max_read_entries) break; } - spin_unlock_bh(ring_lock); + spin_unlock_irq(ring_lock); return cnt; } @@ -8354,6 +8370,7 @@ no_cmf: phba->cgn_i = NULL; /* Ensure CGN Mode is off */ phba->cmf_active_mode = LPFC_CFG_OFF; + sli4_params->cmf = 0; return 0; } } diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index cbb1aa1cf025..f927c2a25d54 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -556,6 +556,7 @@ struct lpfc_pc_sli4_params { #define LPFC_MIB3_SUPPORT 3 uint16_t mi_value; #define LPFC_DFLT_MIB_VAL 2 + uint8_t mi_cap; uint8_t mib_bde_cnt; uint8_t cmf; uint8_t cqv; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 192d5630a44d..41a1128f8651 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.7" +#define LPFC_DRIVER_VERSION "14.2.0.9" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d265a2d9d082..3ceece988338 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2927,15 +2927,14 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd) * Sets the FW busy flag and reduces the host->can_queue if the * cmd has not been completed within the timeout period. */ -static enum -blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) +static enum scsi_timeout_action megasas_reset_timer(struct scsi_cmnd *scmd) { struct megasas_instance *instance; unsigned long flags; if (time_after(jiffies, scmd->jiffies_at_alloc + (scmd_timeout * 2) * HZ)) { - return BLK_EH_DONE; + return SCSI_EH_NOT_HANDLED; } instance = (struct megasas_instance *)scmd->device->host->hostdata; @@ -2949,7 +2948,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) spin_unlock_irqrestore(instance->host->host_lock, flags); } - return BLK_EH_RESET_TIMER; + return SCSI_EH_RESET_TIMER; } /** diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 6650f8c8e9b0..fe70f8f11435 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -80,7 +80,7 @@ static void megasas_fusion_crash_dump(struct megasas_instance *instance); * @ocr_context: If called from OCR context this will * be set to 1, else 0 * - * This function initates a chip reset followed by a wait for controller to + * This function initiates a chip reset followed by a wait for controller to * transition to ready state. * During this, driver will block all access to PCI config space from userspace */ @@ -334,7 +334,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, * * This function is only for fusion controllers. * Update host can queue, if firmware downgrade max supported firmware commands. - * Firmware upgrade case will be skiped because underlying firmware has + * Firmware upgrade case will be skipped because underlying firmware has * more resource than exposed to the OS. * */ @@ -2588,7 +2588,7 @@ static void megasas_stream_detect(struct megasas_instance *instance, if ((io_info->ldStartBlock != current_sd->next_seq_lba) && ((!io_info->isRead) || (!is_read_ahead))) /* - * Once the API availible we need to change this. + * Once the API is available we need to change this. * At this point we are not allowing any gap */ continue; @@ -4650,7 +4650,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, } /* - * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI + * megasas_fusion_smid_lookup : Look for fusion command corresponding to SCSI * @instance: per adapter struct * * Return Non Zero index, if SMID found in outstanding commands diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 0681daee6c14..e5ecd6ada6cd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -829,6 +829,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, if ((sas_rphy_add(rphy))) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); + sas_rphy_free(rphy); + rphy = NULL; } if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h index 7123a2efbf58..8ef174cd4d37 100644 --- a/drivers/scsi/mvsas/mv_defs.h +++ b/drivers/scsi/mvsas/mv_defs.h @@ -40,6 +40,7 @@ enum driver_configuration { MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ MVS_OAF_SZ = 64, /* Open address frame buffer size */ MVS_QUEUE_SIZE = 64, /* Support Queue depth */ + MVS_RSVD_SLOTS = 4, MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, }; diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 2fde496fff5f..cfe84473a515 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -142,7 +142,7 @@ static void mvs_free(struct mvs_info *mvi) scsi_host_put(mvi->shost); list_for_each_entry(mwq, &mvi->wq_list, entry) cancel_delayed_work(&mwq->work_q); - kfree(mvi->tags); + kfree(mvi->rsvd_tags); kfree(mvi); } @@ -284,10 +284,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) printk(KERN_DEBUG "failed to create dma pool %s.\n", pool_name); goto err_out; } - mvi->tags_num = slot_nr; - /* Initialize tags */ - mvs_tag_init(mvi); return 0; err_out: return 1; @@ -369,8 +366,8 @@ static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev, mvi->sas = sha; mvi->shost = shost; - mvi->tags = kzalloc(MVS_CHIP_SLOT_SZ>>3, GFP_KERNEL); - if (!mvi->tags) + mvi->rsvd_tags = bitmap_zalloc(MVS_RSVD_SLOTS, GFP_KERNEL); + if (!mvi->rsvd_tags) goto err_out; if (MVS_CHIP_DISP->chip_ioremap(mvi)) @@ -471,6 +468,8 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost, else can_queue = MVS_CHIP_SLOT_SZ; + can_queue -= MVS_RSVD_SLOTS; + shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); shost->can_queue = can_queue; mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index a6867dae0e7c..9978c424214c 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -20,44 +20,40 @@ static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) return 0; } -void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) { - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags; clear_bit(tag, bitmap); } -void mvs_tag_free(struct mvs_info *mvi, u32 tag) +static void mvs_tag_free(struct mvs_info *mvi, u32 tag) { + if (tag >= MVS_RSVD_SLOTS) + return; + mvs_tag_clear(mvi, tag); } -void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) { - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags; set_bit(tag, bitmap); } -inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) { unsigned int index, tag; - void *bitmap = mvi->tags; + void *bitmap = mvi->rsvd_tags; - index = find_first_zero_bit(bitmap, mvi->tags_num); + index = find_first_zero_bit(bitmap, MVS_RSVD_SLOTS); tag = index; - if (tag >= mvi->tags_num) + if (tag >= MVS_RSVD_SLOTS) return -SAS_QUEUE_FULL; mvs_tag_set(mvi, tag); *tag_out = tag; return 0; } -void mvs_tag_init(struct mvs_info *mvi) -{ - int i; - for (i = 0; i < mvi->tags_num; ++i) - mvs_tag_clear(mvi, i); -} - static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) { unsigned long i = 0, j = 0, hi = 0; @@ -703,6 +699,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf struct mvs_task_exec_info tei; struct mvs_slot_info *slot; u32 tag = 0xdeadbeef, n_elem = 0; + struct request *rq; int rc = 0; if (!dev->port) { @@ -767,9 +764,14 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf n_elem = task->num_scatter; } - rc = mvs_tag_alloc(mvi, &tag); - if (rc) - goto err_out; + rq = sas_task_find_rq(task); + if (rq) { + tag = rq->tag + MVS_RSVD_SLOTS; + } else { + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + } slot = &mvi->slot_info[tag]; @@ -864,7 +866,7 @@ int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags) static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) { u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - mvs_tag_clear(mvi, slot_idx); + mvs_tag_free(mvi, slot_idx); } static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, @@ -1190,23 +1192,16 @@ static int mvs_dev_found_notify(struct domain_device *dev, int lock) mvi_device->sas_device = dev; if (parent_dev && dev_is_expander(parent_dev->dev_type)) { int phy_id; - u8 phy_num = parent_dev->ex_dev.num_phys; - struct ex_phy *phy; - for (phy_id = 0; phy_id < phy_num; phy_id++) { - phy = &parent_dev->ex_dev.ex_phy[phy_id]; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(dev->sas_addr)) { - mvi_device->attached_phy = phy_id; - break; - } - } - if (phy_id == phy_num) { + phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); + if (phy_id < 0) { mv_printk("Error: no attached dev:%016llx" "at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(parent_dev->sas_addr)); - res = -1; + res = phy_id; + } else { + mvi_device->attached_phy = phy_id; } } diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 509d8f32a04f..68df771e2975 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -370,8 +370,7 @@ struct mvs_info { u32 chip_id; const struct mvs_chip_info *chip; - int tags_num; - unsigned long *tags; + unsigned long *rsvd_tags; /* further per-slot information */ struct mvs_phy phy[MVS_MAX_PHYS]; struct mvs_port port[MVS_MAX_PHYS]; @@ -424,11 +423,6 @@ struct mvs_task_exec_info { /******************** function prototype *********************/ void mvs_get_sas_addr(void *buf, u32 buflen); -void mvs_tag_clear(struct mvs_info *mvi, u32 tag); -void mvs_tag_free(struct mvs_info *mvi, u32 tag); -void mvs_tag_set(struct mvs_info *mvi, unsigned int tag); -int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out); -void mvs_tag_init(struct mvs_info *mvi); void mvs_iounmap(void __iomem *regs); int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 05d3ce9b72db..b3dcb8918618 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -2109,7 +2109,7 @@ out_return_cmd: return 0; } -static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) +static enum scsi_timeout_action mvumi_timed_out(struct scsi_cmnd *scmd) { struct mvumi_cmd *cmd = mvumi_priv(scmd)->cmd_priv; struct Scsi_Host *host = scmd->device->host; @@ -2137,7 +2137,7 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) mvumi_return_cmd(mhba, cmd); spin_unlock_irqrestore(mhba->shost->host_lock, flags); - return BLK_EH_DONE; + return SCSI_EH_NOT_HANDLED; } static int diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 48acab03a8a0..a5a1406a2bde 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -450,8 +450,6 @@ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt) sync_data *sync = &(data->Sync[target]); struct nsp_sync_table *sync_table; unsigned int period, offset; - int i; - nsp_dbg(NSP_DEBUG_SYNC, "in"); @@ -466,7 +464,7 @@ static int nsp_analyze_sdtr(struct scsi_cmnd *SCpnt) sync_table = nsp_sync_table_40M; } - for ( i = 0; sync_table->max_period != 0; i++, sync_table++) { + for (; sync_table->max_period != 0; sync_table++) { if ( period >= sync_table->min_period && period <= sync_table->max_period ) { break; diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 628b08ba6770..ec1a9ab61814 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1724,7 +1724,14 @@ void pm8001_work_fn(struct work_struct *work) pm8001_free_dev(pm8001_dev); } } - } break; + } + break; + case IO_XFER_ERROR_ABORTED_NCQ_MODE: + { + dev = pm8001_dev->sas_device; + sas_ata_device_link_abort(dev, false); + } + break; } kfree(pw); } @@ -1748,110 +1755,6 @@ int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, return ret; } -static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, - struct pm8001_device *pm8001_ha_dev) -{ - struct pm8001_ccb_info *ccb; - struct sas_task *task; - struct task_abort_req task_abort; - u32 opc = OPC_INB_SATA_ABORT; - int ret; - - pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG; - pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG; - - task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { - pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); - return; - } - - task->task_done = pm8001_task_done; - - ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task); - if (!ccb) { - sas_free_task(task); - return; - } - - memset(&task_abort, 0, sizeof(task_abort)); - task_abort.abort_all = cpu_to_le32(1); - task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - task_abort.tag = cpu_to_le32(ccb->ccb_tag); - - ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort, - sizeof(task_abort), 0); - if (ret) { - sas_free_task(task); - pm8001_ccb_free(pm8001_ha, ccb); - } -} - -static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, - struct pm8001_device *pm8001_ha_dev) -{ - struct sata_start_req sata_cmd; - int res; - struct pm8001_ccb_info *ccb; - struct sas_task *task = NULL; - struct host_to_dev_fis fis; - struct domain_device *dev; - u32 opc = OPC_INB_SATA_HOST_OPSTART; - - task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { - pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n"); - return; - } - task->task_done = pm8001_task_done; - - /* - * Allocate domain device by ourselves as libsas is not going to - * provide any. - */ - dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); - if (!dev) { - sas_free_task(task); - pm8001_dbg(pm8001_ha, FAIL, - "Domain device cannot be allocated\n"); - return; - } - task->dev = dev; - task->dev->lldd_dev = pm8001_ha_dev; - - ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task); - if (!ccb) { - sas_free_task(task); - kfree(dev); - return; - } - - pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; - pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; - - /* construct read log FIS */ - memset(&fis, 0, sizeof(struct host_to_dev_fis)); - fis.fis_type = 0x27; - fis.flags = 0x80; - fis.command = ATA_CMD_READ_LOG_EXT; - fis.lbal = 0x10; - fis.sector_count = 0x1; - - memset(&sata_cmd, 0, sizeof(sata_cmd)); - sata_cmd.tag = cpu_to_le32(ccb->ccb_tag); - sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m = cpu_to_le32((0x1 << 7) | (0x5 << 9)); - memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); - - res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd, - sizeof(sata_cmd), 0); - if (res) { - sas_free_task(task); - pm8001_ccb_free(pm8001_ha, ccb); - kfree(dev); - } -} - /** * mpi_ssp_completion- process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -2295,12 +2198,13 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (t->dev && (t->dev->lldd_dev)) pm8001_dev = t->dev->lldd_dev; } else { - pm8001_dbg(pm8001_ha, FAIL, "task null\n"); + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); return; } - if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) - && unlikely(!t || !t->lldd_task || !t->dev)) { + if (pm8001_dev && unlikely(!t || !t->lldd_task || !t->dev)) { pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); return; } @@ -2358,15 +2262,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_SAM_STAT_GOOD; - /* check if response is for SEND READ LOG */ - if (pm8001_dev && - (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { - pm8001_send_abort_all(pm8001_ha, pm8001_dev); - /* Free the tag */ - pm8001_tag_free(pm8001_ha, tag); - sas_free_task(t); - return; - } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2664,9 +2559,10 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { /* find device using device id */ pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); - /* send read log extension */ if (pm8001_dev) - pm8001_send_read_log(pm8001_ha, pm8001_dev); + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_XFER_ERROR_ABORTED_NCQ_MODE); return; } @@ -2675,8 +2571,17 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_dev = ccb->device; if (event) pm8001_dbg(pm8001_ha, FAIL, "sata IO status 0x%x\n", event); - if (unlikely(!t || !t->lldd_task || !t->dev)) + + if (unlikely(!t)) { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); return; + } + + if (unlikely(!t->lldd_task || !t->dev)) + return; + ts = &t->task_status; pm8001_dbg(pm8001_ha, DEVIO, "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", @@ -3638,12 +3543,7 @@ int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_task_free(pm8001_ha, ccb); mb(); - if (pm8001_dev->id & NCQ_ABORT_ALL_FLAG) { - sas_free_task(t); - pm8001_dev->id &= ~NCQ_ABORT_ALL_FLAG; - } else { - t->task_done(t); - } + t->task_done(t); return 0; } @@ -4195,7 +4095,6 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u64 phys_addr; u32 ATAP = 0x0; u32 dir; - unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); @@ -4250,39 +4149,6 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.esgl = 0; } - /* Check for read log for failed drive and return */ - if (sata_cmd.sata_fis.command == 0x2f) { - if (((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || - (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || - (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { - struct task_status_struct *ts; - - pm8001_ha_dev->id &= 0xDFFFFFFF; - ts = &task->task_status; - - spin_lock_irqsave(&task->task_state_lock, flags); - ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAS_SAM_STAT_GOOD; - task->task_state_flags &= ~SAS_TASK_STATE_PENDING; - task->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((task->task_state_flags & - SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&task->task_state_lock, - flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n", - task, ts->resp, - ts->stat); - pm8001_ccb_task_free(pm8001_ha, ccb); - } else { - spin_unlock_irqrestore(&task->task_state_lock, - flags); - pm8001_ccb_task_free_done(pm8001_ha, ccb); - return 0; - } - } - } - return pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd, sizeof(sata_cmd), 0); } diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 7a7d63aa90e2..7e589fe3e010 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -197,7 +197,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); flush_workqueue(pm8001_wq); - bitmap_free(pm8001_ha->tags); + bitmap_free(pm8001_ha->rsvd_tags); kfree(pm8001_ha); } @@ -437,8 +437,6 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, atomic_set(&pm8001_ha->devices[i].running_req, 0); } pm8001_ha->flags = PM8001F_INIT_TIME; - /* Initialize tags */ - pm8001_tag_init(pm8001_ha); return 0; err_out_nodev: @@ -1211,18 +1209,15 @@ static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha) struct Scsi_Host *shost = pm8001_ha->shost; struct device *dev = pm8001_ha->dev; u32 max_out_io, ccb_count; - u32 can_queue; int i; max_out_io = pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io; ccb_count = min_t(int, PM8001_MAX_CCB, max_out_io); - /* Update to the scsi host*/ - can_queue = ccb_count - PM8001_RESERVE_SLOT; - shost->can_queue = can_queue; + shost->can_queue = ccb_count - PM8001_RESERVE_SLOT; - pm8001_ha->tags = bitmap_zalloc(ccb_count, GFP_KERNEL); - if (!pm8001_ha->tags) + pm8001_ha->rsvd_tags = bitmap_zalloc(PM8001_RESERVE_SLOT, GFP_KERNEL); + if (!pm8001_ha->rsvd_tags) goto err_out; /* Memory region for ccb_info*/ @@ -1247,7 +1242,6 @@ static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha) pm8001_ha->ccb_info[i].task = NULL; pm8001_ha->ccb_info[i].ccb_tag = PM8001_INVALID_TAG; pm8001_ha->ccb_info[i].device = NULL; - ++pm8001_ha->tags_num; } return 0; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 8e3f2f9ddaac..e5673c774f66 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -65,9 +65,12 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag) */ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { - void *bitmap = pm8001_ha->tags; + void *bitmap = pm8001_ha->rsvd_tags; unsigned long flags; + if (tag >= PM8001_RESERVE_SLOT) + return; + spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); __clear_bit(tag, bitmap); spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); @@ -80,29 +83,24 @@ void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) */ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out) { - void *bitmap = pm8001_ha->tags; + void *bitmap = pm8001_ha->rsvd_tags; unsigned long flags; unsigned int tag; spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags); - tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num); - if (tag >= pm8001_ha->tags_num) { + tag = find_first_zero_bit(bitmap, PM8001_RESERVE_SLOT); + if (tag >= PM8001_RESERVE_SLOT) { spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); return -SAS_QUEUE_FULL; } __set_bit(tag, bitmap); spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags); + + /* reserved tags are in the lower region of the tagset */ *tag_out = tag; return 0; } -void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) -{ - int i; - for (i = 0; i < pm8001_ha->tags_num; ++i) - pm8001_tag_free(pm8001_ha, i); -} - /** * pm8001_mem_alloc - allocate memory for pm8001. * @pdev: pci device. @@ -645,22 +643,16 @@ static int pm8001_dev_found_notify(struct domain_device *dev) pm8001_device->dcompletion = &completion; if (parent_dev && dev_is_expander(parent_dev->dev_type)) { int phy_id; - struct ex_phy *phy; - for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys; - phy_id++) { - phy = &parent_dev->ex_dev.ex_phy[phy_id]; - if (SAS_ADDR(phy->attached_sas_addr) - == SAS_ADDR(dev->sas_addr)) { - pm8001_device->attached_phy = phy_id; - break; - } - } - if (phy_id == parent_dev->ex_dev.num_phys) { + + phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev); + if (phy_id < 0) { pm8001_dbg(pm8001_ha, FAIL, "Error: no attached dev:%016llx at ex:%016llx.\n", SAS_ADDR(dev->sas_addr), SAS_ADDR(parent_dev->sas_addr)); - res = -1; + res = phy_id; + } else { + pm8001_device->attached_phy = phy_id; } } else { if (dev->dev_type == SAS_SATA_DEV) { @@ -687,12 +679,6 @@ int pm8001_dev_found(struct domain_device *dev) return pm8001_dev_found_notify(dev); } -void pm8001_task_done(struct sas_task *task) -{ - del_timer(&task->slow_task->timer); - complete(&task->slow_task->completion); -} - #define PM8001_TASK_TIMEOUT 20 /** @@ -983,6 +969,7 @@ int pm8001_query_task(struct sas_task *task) /* mandatory SAM-3, still need free task/ccb info, abort the specified task */ int pm8001_abort_task(struct sas_task *task) { + struct pm8001_ccb_info *ccb = task->lldd_task; unsigned long flags; u32 tag; struct domain_device *dev ; @@ -992,7 +979,7 @@ int pm8001_abort_task(struct sas_task *task) u32 phy_id, port_id; struct sas_task_slow slow_task; - if (unlikely(!task || !task->lldd_task || !task->dev)) + if (!task->lldd_task || !task->dev) return TMF_RESP_FUNC_FAILED; dev = task->dev; @@ -1113,6 +1100,13 @@ int pm8001_abort_task(struct sas_task *task) pm8001_dev, DS_OPERATIONAL); wait_for_completion(&completion); } else { + /* + * Ensure that if we see a completion for the ccb + * associated with the task which we are trying to + * abort then we should not touch the sas_task as it + * may race with libsas freeing it when return here. + */ + ccb->task = NULL; ret = sas_execute_internal_abort_single(dev, tag, 0, NULL); } rc = TMF_RESP_FUNC_COMPLETE; diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index b08f52673889..dc1f4d958e03 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -510,8 +510,7 @@ struct pm8001_hba_info { u32 chip_id; const struct pm8001_chip_info *chip; struct completion *nvmd_completion; - int tags_num; - unsigned long *tags; + unsigned long *rsvd_tags; struct pm8001_phy phy[PM8001_MAX_PHYS]; struct pm8001_port port[PM8001_MAX_PHYS]; u32 id; @@ -535,7 +534,6 @@ struct pm8001_hba_info { bool controller_fatal_error; const struct firmware *fw_image; struct isr_param irq_vector[PM8001_MAX_MSIX_VEC]; - u32 reset_in_progress; u32 non_fatal_count; u32 non_fatal_read_length; u32 max_q_num; @@ -579,10 +577,6 @@ struct pm8001_fw_image_header { #define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 #define FLASH_UPDATE_DISABLED 0x11 -#define NCQ_READ_LOG_FLAG 0x80000000 -#define NCQ_ABORT_ALL_FLAG 0x40000000 -#define NCQ_2ND_RLE_FLAG 0x20000000 - /* Device states */ #define DS_OPERATIONAL 0x01 #define DS_PORT_IN_RESET 0x02 @@ -636,7 +630,6 @@ extern struct workqueue_struct *pm8001_wq; /******************** function prototype *********************/ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); -void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha); u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag); void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb); @@ -709,7 +702,6 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb); int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); struct sas_task *pm8001_alloc_task(void); -void pm8001_task_done(struct sas_task *task); void pm8001_free_task(struct sas_task *task); void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, @@ -742,9 +734,15 @@ pm8001_ccb_alloc(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *dev, struct sas_task *task) { struct pm8001_ccb_info *ccb; + struct request *rq = NULL; u32 tag; - if (pm8001_tag_alloc(pm8001_ha, &tag)) { + if (task) + rq = sas_task_find_rq(task); + + if (rq) { + tag = rq->tag + PM8001_RESERVE_SLOT; + } else if (pm8001_tag_alloc(pm8001_ha, &tag)) { pm8001_dbg(pm8001_ha, FAIL, "Failed to allocate a tag\n"); return NULL; } diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index f8b8624458f7..9584cadc4201 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -1778,113 +1778,6 @@ pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) pm80xx_chip_intx_interrupt_disable(pm8001_ha); } -static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, - struct pm8001_device *pm8001_ha_dev) -{ - struct pm8001_ccb_info *ccb; - struct sas_task *task; - struct task_abort_req task_abort; - u32 opc = OPC_INB_SATA_ABORT; - int ret; - - pm8001_ha_dev->id |= NCQ_ABORT_ALL_FLAG; - pm8001_ha_dev->id &= ~NCQ_READ_LOG_FLAG; - - task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { - pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task\n"); - return; - } - task->task_done = pm8001_task_done; - - ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task); - if (!ccb) { - sas_free_task(task); - return; - } - - memset(&task_abort, 0, sizeof(task_abort)); - task_abort.abort_all = cpu_to_le32(1); - task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - task_abort.tag = cpu_to_le32(ccb->ccb_tag); - - ret = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &task_abort, - sizeof(task_abort), 0); - pm8001_dbg(pm8001_ha, FAIL, "Executing abort task end\n"); - if (ret) { - sas_free_task(task); - pm8001_ccb_free(pm8001_ha, ccb); - } -} - -static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, - struct pm8001_device *pm8001_ha_dev) -{ - struct sata_start_req sata_cmd; - int res; - struct pm8001_ccb_info *ccb; - struct sas_task *task = NULL; - struct host_to_dev_fis fis; - struct domain_device *dev; - u32 opc = OPC_INB_SATA_HOST_OPSTART; - - task = sas_alloc_slow_task(GFP_ATOMIC); - if (!task) { - pm8001_dbg(pm8001_ha, FAIL, "cannot allocate task !!!\n"); - return; - } - task->task_done = pm8001_task_done; - - /* - * Allocate domain device by ourselves as libsas is not going to - * provide any. - */ - dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); - if (!dev) { - sas_free_task(task); - pm8001_dbg(pm8001_ha, FAIL, - "Domain device cannot be allocated\n"); - return; - } - - task->dev = dev; - task->dev->lldd_dev = pm8001_ha_dev; - - ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_ha_dev, task); - if (!ccb) { - sas_free_task(task); - kfree(dev); - return; - } - - pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; - pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; - - memset(&sata_cmd, 0, sizeof(sata_cmd)); - - /* construct read log FIS */ - memset(&fis, 0, sizeof(struct host_to_dev_fis)); - fis.fis_type = 0x27; - fis.flags = 0x80; - fis.command = ATA_CMD_READ_LOG_EXT; - fis.lbal = 0x10; - fis.sector_count = 0x1; - - sata_cmd.tag = cpu_to_le32(ccb->ccb_tag); - sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); - sata_cmd.ncqtag_atap_dir_m_dad = cpu_to_le32(((0x1 << 7) | (0x5 << 9))); - memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); - - res = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &sata_cmd, - sizeof(sata_cmd), 0); - pm8001_dbg(pm8001_ha, FAIL, "Executing read log end\n"); - if (res) { - sas_free_task(task); - pm8001_ccb_free(pm8001_ha, ccb); - kfree(dev); - } -} - /** * mpi_ssp_completion - process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -2396,15 +2289,15 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, if (t->dev && (t->dev->lldd_dev)) pm8001_dev = t->dev->lldd_dev; } else { - pm8001_dbg(pm8001_ha, FAIL, "task null\n"); + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); return; } - if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) - && unlikely(!t || !t->lldd_task || !t->dev)) { - pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); + + if (pm8001_dev && unlikely(!t->lldd_task || !t->dev)) return; - } ts = &t->task_status; @@ -2461,15 +2354,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_SAM_STAT_GOOD; - /* check if response is for SEND READ LOG */ - if (pm8001_dev && - (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { - pm80xx_send_abort_all(pm8001_ha, pm8001_dev); - /* Free the tag */ - pm8001_tag_free(pm8001_ha, tag); - sas_free_task(t); - return; - } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2804,21 +2688,27 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { /* find device using device id */ pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); - /* send read log extension */ + /* send read log extension by aborting the link - libata does what we want */ if (pm8001_dev) - pm80xx_send_read_log(pm8001_ha, pm8001_dev); + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_XFER_ERROR_ABORTED_NCQ_MODE); return; } ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; - - if (unlikely(!t || !t->lldd_task || !t->dev)) { - pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n"); + if (unlikely(!t)) { + pm8001_dbg(pm8001_ha, FAIL, "task null, freeing CCB tag %d\n", + ccb->ccb_tag); + pm8001_ccb_free(pm8001_ha, ccb); return; } + if (unlikely(!t->lldd_task || !t->dev)) + return; + ts = &t->task_status; pm8001_dbg(pm8001_ha, IOERR, "port_id:0x%x, tag:0x%x, event:0x%x\n", port_id, tag, event); @@ -3550,10 +3440,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) case HW_EVENT_PHY_DOWN: pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n"); hw_event_phy_down(pm8001_ha, piomb); - if (pm8001_ha->reset_in_progress) { - pm8001_dbg(pm8001_ha, MSG, "Reset in progress\n"); - return 0; - } phy->phy_attached = 0; phy->phy_state = PHY_LINK_DISABLE; break; @@ -4357,25 +4243,12 @@ static int check_enc_sat_cmd(struct sas_task *task) static u32 pm80xx_chip_get_q_index(struct sas_task *task) { - struct scsi_cmnd *scmd = NULL; - u32 blk_tag; - - if (task->uldd_task) { - struct ata_queued_cmd *qc; + struct request *rq = sas_task_find_rq(task); - if (dev_is_sata(task->dev)) { - qc = task->uldd_task; - scmd = qc->scsicmd; - } else { - scmd = task->uldd_task; - } - } - - if (!scmd) + if (!rq) return 0; - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); - return blk_mq_unique_tag_to_hwq(blk_tag); + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(rq)); } /** @@ -4550,7 +4423,6 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 end_addr_high, end_addr_low; u32 ATAP = 0x0; u32 dir; - unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); @@ -4729,40 +4601,6 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, (task->ata_task.atapi_packet[15] << 24))); } - /* Check for read log for failed drive and return */ - if (sata_cmd.sata_fis.command == 0x2f) { - if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || - (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || - (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { - struct task_status_struct *ts; - - pm8001_ha_dev->id &= 0xDFFFFFFF; - ts = &task->task_status; - - spin_lock_irqsave(&task->task_state_lock, flags); - ts->resp = SAS_TASK_COMPLETE; - ts->stat = SAS_SAM_STAT_GOOD; - task->task_state_flags &= ~SAS_TASK_STATE_PENDING; - task->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((task->task_state_flags & - SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&task->task_state_lock, - flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p resp 0x%x stat 0x%x but aborted by upper layer\n", - task, ts->resp, - ts->stat); - pm8001_ccb_task_free(pm8001_ha, ccb); - return 0; - } else { - spin_unlock_irqrestore(&task->task_state_lock, - flags); - pm8001_ccb_task_free_done(pm8001_ha, ccb); - atomic_dec(&pm8001_ha_dev->running_req); - return 0; - } - } - } trace_pm80xx_request_issue(pm8001_ha->id, ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS, ccb->ccb_tag, opc, diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index e045c6e25090..35e16600fc63 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2951,7 +2951,6 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) int i; struct scsi_bd *pbl; u64 *list; - dma_addr_t page; /* Alloc dma memory for BDQ buffers */ for (i = 0; i < QEDF_BDQ_SIZE; i++) { @@ -3012,11 +3011,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size / QEDF_PAGE_SIZE; list = (u64 *)qedf->bdq_pbl_list; - page = qedf->bdq_pbl_list_dma; for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) { *list = qedf->bdq_pbl_dma; list++; - page += QEDF_PAGE_SIZE; } return 0; diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index df2fe7bd26d1..f2ee49756df8 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -618,7 +618,7 @@ static int qedi_cm_alloc_mem(struct qedi_ctx *qedi) sizeof(struct qedi_endpoint *)), GFP_KERNEL); if (!qedi->ep_tbl) return -ENOMEM; - port_id = prandom_u32_max(QEDI_LOCAL_PORT_RANGE); + port_id = get_random_u32_below(QEDI_LOCAL_PORT_RANGE); if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE, QEDI_LOCAL_PORT_MIN, port_id)) { qedi_cm_free_mem(qedi); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 802eec6407d9..a26a373be9da 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -5136,17 +5136,17 @@ struct secure_flash_update_block_pk { (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || \ test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags)) -#define QLA_VHA_MARK_BUSY(__vha, __bail) do { \ - atomic_inc(&__vha->vref_count); \ - mb(); \ - if (__vha->flags.delete_progress) { \ - atomic_dec(&__vha->vref_count); \ - wake_up(&__vha->vref_waitq); \ - __bail = 1; \ - } else { \ - __bail = 0; \ - } \ -} while (0) +static inline bool qla_vha_mark_busy(scsi_qla_host_t *vha) +{ + atomic_inc(&vha->vref_count); + mb(); + if (vha->flags.delete_progress) { + atomic_dec(&vha->vref_count); + wake_up(&vha->vref_waitq); + return true; + } + return false; +} #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ atomic_dec(&__vha->vref_count); \ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e12db95de688..ce4c5d728407 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -168,7 +168,6 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) struct srb_iocb *abt_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; - uint8_t bail; /* ref: INIT for ABTS command */ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, @@ -176,7 +175,7 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) if (!sp) return QLA_MEMORY_ALLOC_FAILED; - QLA_VHA_MARK_BUSY(vha, bail); + qla_vha_mark_busy(vha); abt_iocb = &sp->u.iocb_cmd; sp->type = SRB_ABT_CMD; sp->name = "abort"; @@ -2020,14 +2019,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; - uint8_t bail; /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; - QLA_VHA_MARK_BUSY(vha, bail); + qla_vha_mark_busy(vha); sp->type = SRB_TM_CMD; sp->name = "tmf"; qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), @@ -5546,7 +5544,6 @@ static int qla2x00_configure_local_loop(scsi_qla_host_t *vha) { int rval, rval2; - int found_devs; int found; fc_port_t *fcport, *new_fcport; uint16_t index; @@ -5561,7 +5558,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) if (N2N_TOPO(ha)) return qla2x00_configure_n2n_loop(vha); - found_devs = 0; new_fcport = NULL; entries = MAX_FIBRE_DEVICES_LOOP; @@ -5720,8 +5716,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) /* Base iIDMA settings on HBA port speed. */ fcport->fp_speed = ha->link_data_rate; - - found_devs++; } list_for_each_entry(fcport, &vha->vp_fcports, list) { diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index db17f7f410cd..5185dc5daf80 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -225,11 +225,9 @@ static inline srb_t * qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) { srb_t *sp = NULL; - uint8_t bail; struct qla_qpair *qpair; - QLA_VHA_MARK_BUSY(vha, bail); - if (unlikely(bail)) + if (unlikely(qla_vha_mark_busy(vha))) return NULL; qpair = vha->hw->base_qpair; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2c85f3cce726..7fb28c207ee5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3284,7 +3284,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->max_cmd_len, host->max_channel, host->max_lun, host->transportt, sht->vendor_id); - INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); INIT_WORK(&ha->heartbeat_work, qla_heartbeat_work_fn); /* Set up the irqs */ @@ -5069,13 +5068,11 @@ struct qla_work_evt * qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; - uint8_t bail; if (test_bit(UNLOADING, &vha->dpc_flags)) return NULL; - QLA_VHA_MARK_BUSY(vha, bail); - if (bail) + if (qla_vha_mark_busy(vha)) return NULL; e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index bb754a950802..548f22705ddc 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -6741,6 +6741,9 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha) mutex_init(&vha->vha_tgt.tgt_mutex); mutex_init(&vha->vha_tgt.tgt_host_action_mutex); + INIT_LIST_HEAD(&vha->unknown_atio_list); + INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn); + qlt_clear_mode(vha); /* diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 9e849f6b0d0f..005502125b27 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -116,7 +116,7 @@ static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, static int qla4xxx_get_iface_param(struct iscsi_iface *iface, enum iscsi_param_type param_type, int param, char *buf); -static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); +static enum scsi_timeout_action qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking); @@ -1871,17 +1871,17 @@ exit_get_stats: return; } -static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) +static enum scsi_timeout_action qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) { struct iscsi_cls_session *session; unsigned long flags; - enum blk_eh_timer_return ret = BLK_EH_DONE; + enum scsi_timeout_action ret = SCSI_EH_NOT_HANDLED; session = starget_to_session(scsi_target(sc->device)); spin_lock_irqsave(&session->lock, flags); if (session->state == ISCSI_SESSION_FAILED) - ret = BLK_EH_RESET_TIMER; + ret = SCSI_EH_RESET_TIMER; spin_unlock_irqrestore(&session->lock, flags); return ret; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index c59eac7a32f2..1426b9b03612 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -563,14 +563,14 @@ int scsi_device_get(struct scsi_device *sdev) { if (sdev->sdev_state == SDEV_DEL || sdev->sdev_state == SDEV_CANCEL) goto fail; - if (!get_device(&sdev->sdev_gendev)) - goto fail; if (!try_module_get(sdev->host->hostt->module)) - goto fail_put_device; + goto fail; + if (!get_device(&sdev->sdev_gendev)) + goto fail_put_module; return 0; -fail_put_device: - put_device(&sdev->sdev_gendev); +fail_put_module: + module_put(sdev->host->hostt->module); fail: return -ENXIO; } @@ -588,6 +588,8 @@ void scsi_device_put(struct scsi_device *sdev) { struct module *mod = sdev->host->hostt->module; + might_sleep(); + put_device(&sdev->sdev_gendev); module_put(mod); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index bebda917b138..cc6953809a24 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3785,7 +3785,7 @@ static int resp_write_scat(struct scsi_cmnd *scp, mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); return illegal_condition_result; } - lrdp = kzalloc(lbdof_blen, GFP_ATOMIC); + lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN); if (lrdp == NULL) return SCSI_MLQUEUE_HOST_BUSY; if (sdebug_verbose) @@ -4436,7 +4436,7 @@ static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) if (ret) return ret; - arr = kcalloc(lb_size, vnum, GFP_ATOMIC); + arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -4504,7 +4504,7 @@ static int resp_report_zones(struct scsi_cmnd *scp, rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD); - arr = kzalloc(alloc_len, GFP_ATOMIC); + arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN); if (!arr) { mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC, INSUFF_RES_ASCQ); @@ -5702,16 +5702,16 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, u64 ns = jiffies_to_nsecs(delta_jiff); if (sdebug_random && ns < U32_MAX) { - ns = prandom_u32_max((u32)ns); + ns = get_random_u32_below((u32)ns); } else if (sdebug_random) { ns >>= 12; /* scale to 4 usec precision */ if (ns < U32_MAX) /* over 4 hours max */ - ns = prandom_u32_max((u32)ns); + ns = get_random_u32_below((u32)ns); ns <<= 12; } kt = ns_to_ktime(ns); } else { /* ndelay has a 4.2 second max */ - kt = sdebug_random ? prandom_u32_max((u32)ndelay) : + kt = sdebug_random ? get_random_u32_below((u32)ndelay) : (u32)ndelay; if (ndelay < INCLUSIVE_TIMING_MAX_NS) { u64 d = ktime_get_boottime_ns() - ns_from_boot; @@ -7340,7 +7340,10 @@ clean: kfree(sdbg_devinfo->zstate); kfree(sdbg_devinfo); } - kfree(sdbg_host); + if (sdbg_host->dev.release) + put_device(&sdbg_host->dev); + else + kfree(sdbg_host); pr_warn("%s: failed, errno=%d\n", __func__, -error); return error; } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 6995c8979230..a7960ad2d386 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -312,7 +312,7 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) * Ensure that all tasks observe the host state change before the * host_failed change. */ - call_rcu(&scmd->rcu, scsi_eh_inc_host_failed); + call_rcu_hurry(&scmd->rcu, scsi_eh_inc_host_failed); } /** @@ -328,7 +328,6 @@ void scsi_eh_scmd_add(struct scsi_cmnd *scmd) enum blk_eh_timer_return scsi_timeout(struct request *req) { struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req); - enum blk_eh_timer_return rtn = BLK_EH_DONE; struct Scsi_Host *host = scmd->device->host; trace_scsi_dispatch_cmd_timeout(scmd); @@ -338,31 +337,30 @@ enum blk_eh_timer_return scsi_timeout(struct request *req) if (host->eh_deadline != -1 && !host->last_reset) host->last_reset = jiffies; - if (host->hostt->eh_timed_out) - rtn = host->hostt->eh_timed_out(scmd); - - if (rtn == BLK_EH_DONE) { - /* - * Set the command to complete first in order to prevent a real - * completion from releasing the command while error handling - * is using it. If the command was already completed, then the - * lower level driver beat the timeout handler, and it is safe - * to return without escalating error recovery. - * - * If timeout handling lost the race to a real completion, the - * block layer may ignore that due to a fake timeout injection, - * so return RESET_TIMER to allow error handling another shot - * at this command. - */ - if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state)) + if (host->hostt->eh_timed_out) { + switch (host->hostt->eh_timed_out(scmd)) { + case SCSI_EH_DONE: + return BLK_EH_DONE; + case SCSI_EH_RESET_TIMER: return BLK_EH_RESET_TIMER; - if (scsi_abort_command(scmd) != SUCCESS) { - set_host_byte(scmd, DID_TIME_OUT); - scsi_eh_scmd_add(scmd); + case SCSI_EH_NOT_HANDLED: + break; } } - return rtn; + /* + * If scsi_done() has already set SCMD_STATE_COMPLETE, do not modify + * *scmd. + */ + if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state)) + return BLK_EH_DONE; + atomic_inc(&scmd->device->iodone_cnt); + if (scsi_abort_command(scmd) != SUCCESS) { + set_host_byte(scmd, DID_TIME_OUT); + scsi_eh_scmd_add(scmd); + } + + return BLK_EH_DONE; } /** diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 2d20da55fb64..fdd47565a311 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -519,7 +519,7 @@ static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode, return -EFAULT; if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) return -EINVAL; - if (get_user(opcode, sic->data)) + if (get_user(opcode, &sic->data[0])) return -EFAULT; bytes = max(in_len, out_len); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8b89fab7c420..9ed1ebcb7443 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -308,6 +308,18 @@ static void scsi_kick_queue(struct request_queue *q) } /* + * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with + * interrupts disabled. + */ +static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data) +{ + struct scsi_device *current_sdev = data; + + if (sdev != current_sdev) + blk_mq_run_hw_queues(sdev->request_queue, true); +} + +/* * Called for single_lun devices on IO completion. Clear starget_sdev_user, * and call blk_run_queue for all the scsi_devices on the target - * including current_sdev first. @@ -317,7 +329,6 @@ static void scsi_kick_queue(struct request_queue *q) static void scsi_single_lun_run(struct scsi_device *current_sdev) { struct Scsi_Host *shost = current_sdev->host; - struct scsi_device *sdev, *tmp; struct scsi_target *starget = scsi_target(current_sdev); unsigned long flags; @@ -334,22 +345,9 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) scsi_kick_queue(current_sdev->request_queue); spin_lock_irqsave(shost->host_lock, flags); - if (starget->starget_sdev_user) - goto out; - list_for_each_entry_safe(sdev, tmp, &starget->devices, - same_target_siblings) { - if (sdev == current_sdev) - continue; - if (scsi_device_get(sdev)) - continue; - - spin_unlock_irqrestore(shost->host_lock, flags); - scsi_kick_queue(sdev->request_queue); - spin_lock_irqsave(shost->host_lock, flags); - - scsi_device_put(sdev); - } - out: + if (!starget->starget_sdev_user) + __starget_for_each_device(starget, current_sdev, + scsi_kick_sdev_queue); spin_unlock_irqrestore(shost->host_lock, flags); } @@ -1343,9 +1341,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q, struct scsi_device *sdev, struct scsi_cmnd *cmd) { - if (scsi_host_in_recovery(shost)) - return 0; - if (atomic_read(&shost->host_blocked) > 0) { if (scsi_host_busy(shost) > 0) goto starved; @@ -1469,8 +1464,6 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) struct Scsi_Host *host = cmd->device->host; int rtn = 0; - atomic_inc(&cmd->device->iorequest_cnt); - /* check if the device is still usable */ if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { /* in SDEV_DEL we error all commands. DID_NO_CONNECT @@ -1734,6 +1727,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, ret = BLK_STS_RESOURCE; if (!scsi_target_queue_ready(shost, sdev)) goto out_put_budget; + if (unlikely(scsi_host_in_recovery(shost))) { + if (cmd->flags & SCMD_FAIL_IF_RECOVERING) + ret = BLK_STS_OFFLINE; + goto out_dec_target_busy; + } if (!scsi_host_queue_ready(q, shost, sdev, cmd)) goto out_dec_target_busy; @@ -1764,6 +1762,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, goto out_dec_host_busy; } + atomic_inc(&cmd->device->iorequest_cnt); return BLK_STS_OK; out_dec_host_busy: @@ -2735,7 +2734,7 @@ static void scsi_stop_queue(struct scsi_device *sdev, bool nowait) blk_mq_quiesce_queue(sdev->request_queue); } else { if (!nowait) - blk_mq_wait_quiesce_done(sdev->request_queue); + blk_mq_wait_quiesce_done(sdev->request_queue->tag_set); } } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index c52de9a973e4..96284a0e13fe 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -111,14 +111,14 @@ extern void scsi_evt_thread(struct work_struct *work); /* scsi_proc.c */ #ifdef CONFIG_SCSI_PROC_FS -extern void scsi_proc_hostdir_add(struct scsi_host_template *); -extern void scsi_proc_hostdir_rm(struct scsi_host_template *); +extern int scsi_proc_hostdir_add(const struct scsi_host_template *); +extern void scsi_proc_hostdir_rm(const struct scsi_host_template *); extern void scsi_proc_host_add(struct Scsi_Host *); extern void scsi_proc_host_rm(struct Scsi_Host *); extern int scsi_init_procfs(void); extern void scsi_exit_procfs(void); #else -# define scsi_proc_hostdir_add(sht) do { } while (0) +# define scsi_proc_hostdir_add(sht) 0 # define scsi_proc_hostdir_rm(sht) do { } while (0) # define scsi_proc_host_add(shost) do { } while (0) # define scsi_proc_host_rm(shost) do { } while (0) diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 95aee1ad1383..4a6eb1741be0 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -43,8 +43,23 @@ static struct proc_dir_entry *proc_scsi; -/* Protect sht->present and sht->proc_dir */ +/* Protects scsi_proc_list */ static DEFINE_MUTEX(global_host_template_mutex); +static LIST_HEAD(scsi_proc_list); + +/** + * struct scsi_proc_entry - (host template, SCSI proc dir) association + * @entry: entry in scsi_proc_list. + * @sht: SCSI host template associated with the procfs directory. + * @proc_dir: procfs directory associated with the SCSI host template. + * @present: Number of SCSI hosts instantiated for @sht. + */ +struct scsi_proc_entry { + struct list_head entry; + const struct scsi_host_template *sht; + struct proc_dir_entry *proc_dir; + unsigned int present; +}; static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) @@ -83,6 +98,45 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file) 4 * PAGE_SIZE); } +static struct scsi_proc_entry * +__scsi_lookup_proc_entry(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e; + + lockdep_assert_held(&global_host_template_mutex); + + list_for_each_entry(e, &scsi_proc_list, entry) + if (e->sht == sht) + return e; + + return NULL; +} + +static struct scsi_proc_entry * +scsi_lookup_proc_entry(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e; + + mutex_lock(&global_host_template_mutex); + e = __scsi_lookup_proc_entry(sht); + mutex_unlock(&global_host_template_mutex); + + return e; +} + +/** + * scsi_template_proc_dir() - returns the procfs dir for a SCSI host template + * @sht: SCSI host template pointer. + */ +struct proc_dir_entry * +scsi_template_proc_dir(const struct scsi_host_template *sht) +{ + struct scsi_proc_entry *e = scsi_lookup_proc_entry(sht); + + return e ? e->proc_dir : NULL; +} +EXPORT_SYMBOL_GPL(scsi_template_proc_dir); + static const struct proc_ops proc_scsi_ops = { .proc_open = proc_scsi_host_open, .proc_release = single_release, @@ -97,35 +151,61 @@ static const struct proc_ops proc_scsi_ops = { * * Sets sht->proc_dir to the new directory. */ - -void scsi_proc_hostdir_add(struct scsi_host_template *sht) +int scsi_proc_hostdir_add(const struct scsi_host_template *sht) { + struct scsi_proc_entry *e; + int ret; + if (!sht->show_info) - return; + return 0; mutex_lock(&global_host_template_mutex); - if (!sht->present++) { - sht->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); - if (!sht->proc_dir) - printk(KERN_ERR "%s: proc_mkdir failed for %s\n", - __func__, sht->proc_name); + e = __scsi_lookup_proc_entry(sht); + if (!e) { + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) { + ret = -ENOMEM; + goto unlock; + } } + if (e->present++) + goto success; + e->proc_dir = proc_mkdir(sht->proc_name, proc_scsi); + if (!e->proc_dir) { + printk(KERN_ERR "%s: proc_mkdir failed for %s\n", __func__, + sht->proc_name); + ret = -ENOMEM; + goto unlock; + } + e->sht = sht; + list_add_tail(&e->entry, &scsi_proc_list); +success: + e = NULL; + ret = 0; +unlock: mutex_unlock(&global_host_template_mutex); + + kfree(e); + return ret; } /** * scsi_proc_hostdir_rm - remove directory in /proc for a scsi host * @sht: owner of directory */ -void scsi_proc_hostdir_rm(struct scsi_host_template *sht) +void scsi_proc_hostdir_rm(const struct scsi_host_template *sht) { + struct scsi_proc_entry *e; + if (!sht->show_info) return; mutex_lock(&global_host_template_mutex); - if (!--sht->present && sht->proc_dir) { + e = __scsi_lookup_proc_entry(sht); + if (e && !--e->present) { remove_proc_entry(sht->proc_name, proc_scsi); - sht->proc_dir = NULL; + list_del(&e->entry); + kfree(e); } mutex_unlock(&global_host_template_mutex); } @@ -137,20 +217,29 @@ void scsi_proc_hostdir_rm(struct scsi_host_template *sht) */ void scsi_proc_host_add(struct Scsi_Host *shost) { - struct scsi_host_template *sht = shost->hostt; + const struct scsi_host_template *sht = shost->hostt; + struct scsi_proc_entry *e; struct proc_dir_entry *p; char name[10]; - if (!sht->proc_dir) + if (!sht->show_info) return; + e = scsi_lookup_proc_entry(sht); + if (!e) + goto err; + sprintf(name,"%d", shost->host_no); - p = proc_create_data(name, S_IRUGO | S_IWUSR, - sht->proc_dir, &proc_scsi_ops, shost); + p = proc_create_data(name, S_IRUGO | S_IWUSR, e->proc_dir, + &proc_scsi_ops, shost); if (!p) - printk(KERN_ERR "%s: Failed to register host %d in" - "%s\n", __func__, shost->host_no, - sht->proc_name); + goto err; + return; + +err: + shost_printk(KERN_ERR, shost, + "%s: Failed to register host (%s failed)\n", __func__, + e ? "proc_create_data()" : "scsi_proc_hostdir_add()"); } /** @@ -159,13 +248,19 @@ void scsi_proc_host_add(struct Scsi_Host *shost) */ void scsi_proc_host_rm(struct Scsi_Host *shost) { + const struct scsi_host_template *sht = shost->hostt; + struct scsi_proc_entry *e; char name[10]; - if (!shost->hostt->proc_dir) + if (!sht->show_info) + return; + + e = scsi_lookup_proc_entry(sht); + if (!e) return; sprintf(name,"%d", shost->host_no); - remove_proc_entry(name, shost->hostt->proc_dir); + remove_proc_entry(name, e->proc_dir); } /** * proc_print_scsidevice - return data about this host diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 5d27f5196de6..7a6904a3928e 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -344,7 +344,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, sdev->request_queue = q; q->queuedata = sdev; __scsi_init_queue(sdev->host, q); - WARN_ON_ONCE(!blk_get_queue(q)); depth = sdev->host->cmd_per_lun ?: 1; @@ -1580,7 +1579,8 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, scsi_complete_async_scans(); if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { - scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata); + scsi_probe_and_add_lun(starget, lun, NULL, &sdev, + SCSI_SCAN_RESCAN, hostdata); scsi_autopm_put_host(shost); } mutex_unlock(&shost->scan_mutex); @@ -1919,7 +1919,7 @@ static void do_scsi_scan_host(struct Scsi_Host *shost) msleep(10); } else { scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, - SCAN_WILD_CARD, 0); + SCAN_WILD_CARD, SCSI_SCAN_INITIAL); } } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index cac7c902cf70..981d1bab2120 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -441,20 +441,15 @@ static void scsi_device_cls_release(struct device *class_dev) put_device(&sdev->sdev_gendev); } -static void scsi_device_dev_release_usercontext(struct work_struct *work) +static void scsi_device_dev_release(struct device *dev) { - struct scsi_device *sdev; + struct scsi_device *sdev = to_scsi_device(dev); struct device *parent; struct list_head *this, *tmp; struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL; struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL; unsigned long flags; - struct module *mod; - - sdev = container_of(work, struct scsi_device, ew.work); - - mod = sdev->host->hostt->module; scsi_dh_release_device(sdev); @@ -518,19 +513,6 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) if (parent) put_device(parent); - module_put(mod); -} - -static void scsi_device_dev_release(struct device *dev) -{ - struct scsi_device *sdp = to_scsi_device(dev); - - /* Set module pointer as NULL in case of module unloading */ - if (!try_module_get(sdp->host->hostt->module)) - sdp->host->hostt->module = NULL; - - execute_in_process_context(scsi_device_dev_release_usercontext, - &sdp->ew); } static struct class sdev_class = { diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 8934160c4a33..0965f8a7134f 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2530,15 +2530,14 @@ static int fc_vport_match(struct attribute_container *cont, * Notes: * This routine assumes no locks are held on entry. */ -enum blk_eh_timer_return -fc_eh_timed_out(struct scsi_cmnd *scmd) +enum scsi_timeout_action fc_eh_timed_out(struct scsi_cmnd *scmd) { struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); if (rport->port_state == FC_PORTSTATE_BLOCKED) - return BLK_EH_RESET_TIMER; + return SCSI_EH_RESET_TIMER; - return BLK_EH_DONE; + return SCSI_EH_NOT_HANDLED; } EXPORT_SYMBOL(fc_eh_timed_out); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f473c002fa4d..13cfd3e317cc 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2989,7 +2989,7 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev } static int -iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) +iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) { char *data = (char*)ev + sizeof(*ev); struct iscsi_cls_conn *conn; @@ -3942,7 +3942,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) err = -EINVAL; break; case ISCSI_UEVENT_SET_PARAM: - err = iscsi_set_param(transport, ev); + err = iscsi_if_set_param(transport, ev); break; case ISCSI_UEVENT_CREATE_CONN: case ISCSI_UEVENT_DESTROY_CONN: diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 98a34ed10f1a..87d0fb8dc503 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -594,13 +594,13 @@ EXPORT_SYMBOL(srp_reconnect_rport); * @scmd: SCSI command. * * If a timeout occurs while an rport is in the blocked state, ask the SCSI - * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core - * handle the timeout (BLK_EH_DONE). + * EH to continue waiting (SCSI_EH_RESET_TIMER). Otherwise let the SCSI core + * handle the timeout (SCSI_EH_NOT_HANDLED). * * Note: This function is called from soft-IRQ context and with the request * queue lock held. */ -enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) +enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd) { struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; @@ -611,7 +611,7 @@ enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) return rport && rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 && i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? - BLK_EH_RESET_TIMER : BLK_EH_DONE; + SCSI_EH_RESET_TIMER : SCSI_EH_NOT_HANDLED; } EXPORT_SYMBOL(srp_timed_out); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index eb76ba055021..faa2b55d1a21 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1026,8 +1026,13 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) /* flush requests don't perform I/O, zero the S/G table */ memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - cmd->cmnd[0] = SYNCHRONIZE_CACHE; - cmd->cmd_len = 10; + if (cmd->device->use_16_for_sync) { + cmd->cmnd[0] = SYNCHRONIZE_CACHE_16; + cmd->cmd_len = 16; + } else { + cmd->cmnd[0] = SYNCHRONIZE_CACHE; + cmd->cmd_len = 10; + } cmd->transfersize = 0; cmd->allowed = sdkp->max_retries; @@ -1587,9 +1592,12 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) sshdr = &my_sshdr; for (retries = 3; retries > 0; --retries) { - unsigned char cmd[10] = { 0 }; + unsigned char cmd[16] = { 0 }; - cmd[0] = SYNCHRONIZE_CACHE; + if (sdp->use_16_for_sync) + cmd[0] = SYNCHRONIZE_CACHE_16; + else + cmd[0] = SYNCHRONIZE_CACHE; /* * Leave the rest of the command zero to indicate * flush everything. diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index bd15624c6322..b163bf936acc 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -921,9 +921,10 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) return 0; } - /* READ16/WRITE16 is mandatory for ZBC disks */ + /* READ16/WRITE16/SYNC16 is mandatory for ZBC devices */ sdkp->device->use_16_for_rw = 1; sdkp->device->use_10_for_rw = 0; + sdkp->device->use_16_for_sync = 1; if (!blk_queue_is_zoned(q)) { /* diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index ce34a8ad53b4..12344be14232 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1726,7 +1726,7 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; - int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; + int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? ITER_SOURCE : ITER_DEST; struct scsi_cmnd *scmd; SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index e550b12e525a..af27bb0f3133 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -1130,7 +1130,7 @@ struct pqi_scsi_dev { u8 phy_id; u8 ncq_prio_enable; u8 ncq_prio_support; - u8 multi_lun_device_lun_count; + u8 lun_count; bool raid_bypass_configured; /* RAID bypass configured */ bool raid_bypass_enabled; /* RAID bypass enabled */ u32 next_bypass_group[RAID_MAP_MAX_DATA_DISKS_PER_ROW]; @@ -1307,7 +1307,6 @@ struct pqi_ctrl_info { dma_addr_t error_buffer_dma_handle; size_t sg_chain_buffer_length; unsigned int num_queue_groups; - u16 max_hw_queue_index; u16 num_elements_per_iq; u16 num_elements_per_oq; u16 max_inbound_iu_length_per_firmware; @@ -1369,8 +1368,6 @@ struct pqi_ctrl_info { u64 sas_address; struct pqi_io_request *io_request_pool; - u16 next_io_request_slot; - struct pqi_event events[PQI_NUM_SUPPORTED_EVENTS]; struct work_struct event_work; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index b971fbe3b3a1..d0446d4d4465 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -33,11 +33,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.18-045" +#define DRIVER_VERSION "2.1.20-035" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 18 -#define DRIVER_REVISION 45 +#define DRIVER_RELEASE 20 +#define DRIVER_REVISION 35 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -678,23 +678,36 @@ static inline void pqi_reinit_io_request(struct pqi_io_request *io_request) io_request->raid_bypass = false; } -static struct pqi_io_request *pqi_alloc_io_request( - struct pqi_ctrl_info *ctrl_info) +static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) { struct pqi_io_request *io_request; - u16 i = ctrl_info->next_io_request_slot; /* benignly racy */ + u16 i; - while (1) { + if (scmd) { /* SML I/O request */ + u32 blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); + + i = blk_mq_unique_tag_to_tag(blk_tag); io_request = &ctrl_info->io_request_pool[i]; - if (atomic_inc_return(&io_request->refcount) == 1) - break; - atomic_dec(&io_request->refcount); - i = (i + 1) % ctrl_info->max_io_slots; + if (atomic_inc_return(&io_request->refcount) > 1) { + atomic_dec(&io_request->refcount); + return NULL; + } + } else { /* IOCTL or driver internal request */ + /* + * benignly racy - may have to wait for an open slot. + * command slot range is scsi_ml_can_queue - + * [scsi_ml_can_queue + (PQI_RESERVED_IO_SLOTS - 1)] + */ + i = 0; + while (1) { + io_request = &ctrl_info->io_request_pool[ctrl_info->scsi_ml_can_queue + i]; + if (atomic_inc_return(&io_request->refcount) == 1) + break; + atomic_dec(&io_request->refcount); + i = (i + 1) % PQI_RESERVED_IO_SLOTS; + } } - /* benignly racy */ - ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots; - pqi_reinit_io_request(io_request); return io_request; @@ -1610,9 +1623,7 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, &id_phys->alternate_paths_phys_connector, sizeof(device->phys_connector)); device->bay = id_phys->phys_bay_in_box; - device->multi_lun_device_lun_count = id_phys->multi_lun_device_lun_count; - if (!device->multi_lun_device_lun_count) - device->multi_lun_device_lun_count = 1; + device->lun_count = id_phys->multi_lun_device_lun_count; if ((id_phys->even_more_flags & PQI_DEVICE_PHY_MAP_SUPPORTED) && id_phys->phy_count) device->phy_id = @@ -1746,7 +1757,7 @@ out: return offline; } -static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, +static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct bmic_identify_physical_device *id_phys) { @@ -1763,6 +1774,20 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, return rc; } +static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, + struct bmic_identify_physical_device *id_phys) +{ + int rc; + + rc = pqi_get_device_info_phys_logical(ctrl_info, device, id_phys); + + if (rc == 0 && device->lun_count == 0) + device->lun_count = 1; + + return rc; +} + static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { @@ -1897,7 +1922,7 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi int rc; int lun; - for (lun = 0; lun < device->multi_lun_device_lun_count; lun++) { + for (lun = 0; lun < device->lun_count; lun++) { rc = pqi_device_wait_for_pending_io(ctrl_info, device, lun, PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS); if (rc) @@ -2076,6 +2101,7 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, existing_device->sas_address = new_device->sas_address; existing_device->queue_depth = new_device->queue_depth; existing_device->device_offline = false; + existing_device->lun_count = new_device->lun_count; if (pqi_is_logical_device(existing_device)) { existing_device->is_external_raid_device = new_device->is_external_raid_device; @@ -2108,10 +2134,6 @@ static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info, existing_device->phy_connected_dev_type = new_device->phy_connected_dev_type; memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); memcpy(existing_device->phys_connector, new_device->phys_connector, sizeof(existing_device->phys_connector)); - - existing_device->multi_lun_device_lun_count = new_device->multi_lun_device_lun_count; - if (existing_device->multi_lun_device_lun_count == 0) - existing_device->multi_lun_device_lun_count = 1; } } @@ -4586,7 +4608,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, goto out; } - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, NULL); put_unaligned_le16(io_request->index, &(((struct pqi_raid_path_request *)request)->request_id)); @@ -5233,7 +5255,6 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info) } ctrl_info->num_queue_groups = num_queue_groups; - ctrl_info->max_hw_queue_index = num_queue_groups - 1; /* * Make sure that the max. inbound IU length is an even multiple @@ -5567,7 +5588,9 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, { struct pqi_io_request *io_request; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, device, scmd, queue_group); @@ -5671,7 +5694,9 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device; device = scmd->device->hostdata; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; io_request->raid_bypass = raid_bypass; @@ -5743,7 +5768,10 @@ static int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request; struct pqi_aio_r1_path_request *r1_request; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; + io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; io_request->raid_bypass = true; @@ -5801,7 +5829,9 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request; struct pqi_aio_r56_path_request *r56_request; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; io_request->raid_bypass = true; @@ -5860,13 +5890,10 @@ static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info, static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd) { - u16 hw_queue; - - hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); - if (hw_queue > ctrl_info->max_hw_queue_index) - hw_queue = 0; - - return hw_queue; + /* + * We are setting host_tagset = 1 during init. + */ + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); } static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) @@ -6268,7 +6295,7 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd struct pqi_scsi_dev *device; device = scmd->device->hostdata; - io_request = pqi_alloc_io_request(ctrl_info); + io_request = pqi_alloc_io_request(ctrl_info, NULL); io_request->io_complete_callback = pqi_lun_reset_complete; io_request->context = &wait; @@ -6484,6 +6511,12 @@ static void pqi_slave_destroy(struct scsi_device *sdev) return; } + device->lun_count--; + if (device->lun_count > 0) { + mutex_unlock(&ctrl_info->scan_mutex); + return; + } + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); list_del(&device->scsi_device_list_entry); spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -7237,7 +7270,7 @@ static ssize_t pqi_raid_level_show(struct device *dev, return -ENODEV; } - if (pqi_is_logical_device(device)) + if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) raid_level = pqi_raid_level_to_string(device->raid_level); else raid_level = "N/A"; @@ -7405,7 +7438,6 @@ static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info) shost->max_channel = PQI_MAX_BUS; shost->max_cmd_len = MAX_COMMAND_SIZE; shost->max_lun = PQI_MAX_LUNS_PER_DEVICE; - shost->max_lun = ~0; shost->max_id = ~0; shost->max_sectors = ctrl_info->max_sectors; shost->can_queue = ctrl_info->scsi_ml_can_queue; @@ -7972,7 +8004,7 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) struct pqi_config_table *config_table; struct pqi_config_table_section_header *section; struct pqi_config_table_section_info section_info; - struct pqi_config_table_section_info feature_section_info; + struct pqi_config_table_section_info feature_section_info = {0}; table_length = ctrl_info->config_table_length; if (table_length == 0) @@ -9008,6 +9040,7 @@ static void pqi_pci_remove(struct pci_dev *pci_dev) { struct pqi_ctrl_info *ctrl_info; u16 vendor_id; + int rc; ctrl_info = pci_get_drvdata(pci_dev); if (!ctrl_info) @@ -9019,6 +9052,13 @@ static void pqi_pci_remove(struct pci_dev *pci_dev) else ctrl_info->ctrl_removal_state = PQI_CTRL_GRACEFUL_REMOVAL; + if (ctrl_info->ctrl_removal_state == PQI_CTRL_GRACEFUL_REMOVAL) { + rc = pqi_flush_cache(ctrl_info, RESTART); + if (rc) + dev_err(&pci_dev->dev, + "unable to flush controller cache during remove\n"); + } + pqi_remove_ctrl(ctrl_info); } @@ -9304,6 +9344,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x110b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x8460) }, { @@ -9404,6 +9448,22 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0086) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0087) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0088) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x0089) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x19e5, 0xd227) }, { @@ -9652,6 +9712,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x1475) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x1480) }, { @@ -9708,6 +9772,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c3) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x14c4) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x14d0) }, { @@ -9944,6 +10016,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1000) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1001) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1002) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) }, { 0 } diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 9b2b5f8c23b9..8fbf3c1b1311 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -304,6 +304,9 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) ret); put_device(&snic->shost->shost_gendev); + spin_lock_irqsave(snic->shost->host_lock, flags); + list_del(&tgt->list); + spin_unlock_irqrestore(snic->shost->host_lock, flags); kfree(tgt); tgt = NULL; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 3c5b7e4227b2..d7a84c0bfaeb 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1651,13 +1651,13 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) * be unbounded on Azure. Reset the timer unconditionally to give the host a * chance to perform EH. */ -static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) +static enum scsi_timeout_action storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) if (scmnd->device->host->transportt == fc_transport_template) return fc_eh_timed_out(scmnd); #endif - return BLK_EH_RESET_TIMER; + return SCSI_EH_RESET_TIMER; } static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 2a79ab16134b..d07d24c06b54 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -731,9 +731,9 @@ static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) * latencies might be higher than on bare metal. Reset the timer * unconditionally to give the host a chance to perform EH. */ -static enum blk_eh_timer_return virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) +static enum scsi_timeout_action virtscsi_eh_timed_out(struct scsi_cmnd *scmnd) { - return BLK_EH_RESET_TIMER; + return SCSI_EH_RESET_TIMER; } static struct scsi_host_template virtscsi_host_template = { |