diff options
Diffstat (limited to 'drivers/scsi')
156 files changed, 11298 insertions, 1783 deletions
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 1c6b4e672687..a12e3525977d 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1823,7 +1823,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) && SCp->device->simple_tags) { - slot->tag = SCp->request->tag; + slot->tag = scsi_cmd_to_rq(SCp)->tag; CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n", slot->tag, slot); } else { diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index adddcd589941..40088dcb98cd 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -1711,7 +1711,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) { blogic_info(" DMA Channel: None, ", adapter); if (adapter->bios_addr > 0) - blogic_info("BIOS Address: 0x%lX, ", adapter, + blogic_info("BIOS Address: 0x%X, ", adapter, adapter->bios_addr); else blogic_info("BIOS Address: None, ", adapter); @@ -3436,7 +3436,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, int len = 0; va_start(args, adapter); - len = vsprintf(buf, fmt, args); + len = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); if (msglevel == BLOGIC_ANNOUNCE_LEVEL) { static int msglines = 0; @@ -3451,7 +3451,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, if (buf[0] != '\n' || len > 1) printk("%sscsi%d: %s", blogic_msglevelmap[msglevel], adapter->host_no, buf); } else - printk("%s", buf); + pr_cont("%s", buf); } else { if (begin) { if (adapter != NULL && adapter->adapter_initd) @@ -3459,7 +3459,7 @@ static void blogic_msg(enum blogic_msglevel msglevel, char *fmt, else printk("%s%s", blogic_msglevelmap[msglevel], buf); } else - printk("%s", buf); + pr_cont("%s", buf); } begin = (buf[len - 1] == '\n'); } diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8f44d433e06e..6e3a04107bb6 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -14,12 +14,16 @@ config RAID_ATTRS help Provides RAID +config SCSI_COMMON + tristate + config SCSI tristate "SCSI device support" depends on BLOCK select SCSI_DMA if HAS_DMA select SG_POOL - select BLK_SCSI_REQUEST + select SCSI_COMMON + select BLK_DEV_BSG_COMMON if BLK_DEV_BSG help If you want to use a SCSI hard disk, SCSI tape drive, SCSI CD-ROM or any other SCSI device under Linux, say Y and make sure that you know @@ -140,6 +144,18 @@ config CHR_DEV_SG If unsure, say N. +config BLK_DEV_BSG + bool "/dev/bsg support (SG v4)" + depends on SCSI + default y + help + Saying Y here will enable generic SG (SCSI generic) v4 support + for any SCSI device. + + This option is required by UDEV to access device serial numbers, etc. + + If unsure, say Y. + config CHR_DEV_SCH tristate "SCSI media changer support" depends on SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 1748d1ec1338..f086eca2bcd7 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -20,7 +20,7 @@ CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF obj-$(CONFIG_PCMCIA) += pcmcia/ obj-$(CONFIG_SCSI) += scsi_mod.o -obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o +obj-$(CONFIG_SCSI_COMMON) += scsi_common.o obj-$(CONFIG_RAID_ATTRS) += raid_class.o @@ -168,6 +168,7 @@ scsi_mod-$(CONFIG_BLK_DEBUG_FS) += scsi_debugfs.o scsi_mod-y += scsi_trace.o scsi_logging.o scsi_mod-$(CONFIG_PM) += scsi_pm.o scsi_mod-$(CONFIG_SCSI_DH) += scsi_dh.o +scsi_mod-$(CONFIG_BLK_DEV_BSG) += scsi_bsg.o hv_storvsc-y := storvsc_drv.o diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 3baadd068768..a85589a2a8af 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -778,7 +778,7 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) } #ifdef CONFIG_SUN3 - if ((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) { + if (sun3scsi_dma_finish(hostdata->connected->sc_data_direction)) { pr_err("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", instance->host_no); BUG(); @@ -1710,7 +1710,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) count = sun3scsi_dma_xfer_len(hostdata, cmd); if (count > 0) { - if (rq_data_dir(cmd->request)) + if (cmd->sc_data_direction == DMA_TO_DEVICE) sun3scsi_dma_send_setup(hostdata, cmd->SCp.ptr, count); else @@ -2158,7 +2158,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) count = sun3scsi_dma_xfer_len(hostdata, tmp); if (count > 0) { - if (rq_data_dir(tmp->request)) + if (tmp->sc_data_direction == DMA_TO_DEVICE) sun3scsi_dma_send_setup(hostdata, tmp->SCp.ptr, count); else diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 46b8dffce2dd..c2d6f0a9e0b1 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -25,7 +25,6 @@ #include <linux/completion.h> #include <linux/blkdev.h> #include <linux/uaccess.h> -#include <linux/highmem.h> /* For flush_kernel_dcache_page */ #include <linux/module.h> #include <asm/unaligned.h> @@ -1505,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd srbcmd->id = cpu_to_le32(scmd_id(cmd)); srbcmd->lun = cpu_to_le32(cmd->device->lun); srbcmd->flags = cpu_to_le32(flag); - timeout = cmd->request->timeout/HZ; + timeout = scsi_cmd_to_rq(cmd)->timeout / HZ; if (timeout == 0) timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT); srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 54eb4d41bc2c..deb32c9f4b3e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -224,7 +224,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) { struct fib *fibptr; - fibptr = &dev->fibs[scmd->request->tag]; + fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag]; /* * Null out fields that depend on being zero at the start of * each I/O diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index f3377e2ef5fb..ffb391967573 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -7423,7 +7423,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, * Set the srb_tag to the command tag + 1, as * srb_tag '0' is used internally by the chip. */ - srb_tag = scp->request->tag + 1; + srb_tag = scsi_cmd_to_rq(scp)->tag + 1; asc_scsi_q->q2.srb_tag = srb_tag; /* @@ -7637,7 +7637,7 @@ static int adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, adv_req_t **adv_reqpp) { - u32 srb_tag = scp->request->tag; + u32 srb_tag = scsi_cmd_to_rq(scp)->tag; adv_req_t *reqp; ADV_SCSI_REQ_Q *scsiqp; int ret; diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 1210e61afb18..584a59522038 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -262,11 +262,12 @@ static void aha1542_free_cmd(struct scsi_cmnd *cmd) struct aha1542_cmd *acmd = scsi_cmd_priv(cmd); if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + struct request *rq = scsi_cmd_to_rq(cmd); void *buf = acmd->data_buffer; struct req_iterator iter; struct bio_vec bv; - rq_for_each_segment(bv, cmd->request, iter) { + rq_for_each_segment(bv, rq, iter) { memcpy_to_page(bv.bv_page, bv.bv_offset, buf, bv.bv_len); buf += bv.bv_len; @@ -447,11 +448,12 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) #endif if (cmd->sc_data_direction == DMA_TO_DEVICE) { + struct request *rq = scsi_cmd_to_rq(cmd); void *buf = acmd->data_buffer; struct req_iterator iter; struct bio_vec bv; - rq_for_each_segment(bv, cmd->request, iter) { + rq_for_each_segment(bv, rq, iter) { memcpy_from_page(buf, bv.bv_page, bv.bv_offset, bv.bv_len); buf += bv.bv_len; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 462717bbb5b7..4e899ec1477d 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -235,8 +235,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba, wrb = alloc_mcc_wrb(phba, &tag); if (!wrb) { mutex_unlock(&ctrl->mbox_lock); - rc = -ENOMEM; - goto free_cmd; + return -ENOMEM; } sge = nonembedded_sgl(wrb); @@ -269,24 +268,6 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba, /* copy the response, if any */ if (resp_buf) memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); - /** - * This is special case of NTWK_GET_IF_INFO where the size of - * response is not known. beiscsi_if_get_info checks the return - * value to free DMA buffer. - */ - if (rc == -EAGAIN) - return rc; - - /** - * If FW is busy that is driver timed out, DMA buffer is saved with - * the tag, only when the cmd completes this buffer is freed. - */ - if (rc == -EBUSY) - return rc; - -free_cmd: - dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size, - nonemb_cmd->va, nonemb_cmd->dma); return rc; } @@ -309,6 +290,19 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba, return 0; } +static void beiscsi_free_nemb_cmd(struct beiscsi_hba *phba, + struct be_dma_mem *cmd, int rc) +{ + /* + * If FW is busy the DMA buffer is saved with the tag. When the cmd + * completes this buffer is freed. + */ + if (rc == -EBUSY) + return; + + dma_free_coherent(&phba->ctrl.pdev->dev, cmd->size, cmd->va, cmd->dma); +} + static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag) { struct be_dma_mem *tag_mem; @@ -344,8 +338,16 @@ int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, cpu_to_le32(set_eqd[i].delay_multiplier); } - return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, - __beiscsi_eq_delay_compl, NULL, 0); + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, __beiscsi_eq_delay_compl, + NULL, 0); + if (rc) { + /* + * Only free on failure. Async cmds are handled like -EBUSY + * where it's handled for us. + */ + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + } + return rc; } /** @@ -372,6 +374,7 @@ int beiscsi_get_initiator_name(struct beiscsi_hba *phba, char *name, bool cfg) req->hdr.version = 1; rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, &resp, sizeof(resp)); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); if (rc) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -449,7 +452,9 @@ static int beiscsi_if_mod_gw(struct beiscsi_hba *phba, req->ip_addr.ip_type = ip_type; memcpy(req->ip_addr.addr, gw, (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN); - return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); + rt_val = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rt_val); + return rt_val; } int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw) @@ -499,8 +504,10 @@ int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type, req = nonemb_cmd.va; req->ip_type = ip_type; - return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, - resp, sizeof(*resp)); + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, resp, + sizeof(*resp)); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + return rc; } static int @@ -537,6 +544,7 @@ beiscsi_if_clr_ip(struct beiscsi_hba *phba, "BG_%d : failed to clear IP: rc %d status %d\n", rc, req->ip_params.ip_record.status); } + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); return rc; } @@ -581,6 +589,7 @@ beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip, if (req->ip_params.ip_record.status) rc = -EINVAL; } + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); return rc; } @@ -608,6 +617,7 @@ int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type, reldhcp->interface_hndl = phba->interface_handle; reldhcp->ip_type = ip_type; rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); if (rc < 0) { beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, "BG_%d : failed to release existing DHCP: %d\n", @@ -689,7 +699,7 @@ int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type) dhcpreq->interface_hndl = phba->interface_handle; dhcpreq->ip_type = ip_type; rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, NULL, 0); - + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); exit: kfree(if_info); return rc; @@ -762,11 +772,8 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type, BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, "BG_%d : Memory Allocation Failure\n"); - /* Free the DMA memory for the IOCTL issuing */ - dma_free_coherent(&phba->ctrl.pdev->dev, - nonemb_cmd.size, - nonemb_cmd.va, - nonemb_cmd.dma); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, + -ENOMEM); return -ENOMEM; } @@ -781,15 +788,13 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type, nonemb_cmd.va)->actual_resp_len; ioctl_size += sizeof(struct be_cmd_req_hdr); - /* Free the previous allocated DMA memory */ - dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, - nonemb_cmd.va, - nonemb_cmd.dma); - + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); /* Free the virtual memory */ kfree(*if_info); - } else + } else { + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); break; + } } while (true); return rc; } @@ -806,8 +811,9 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba, if (rc) return rc; - return beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, - nic, sizeof(*nic)); + rc = beiscsi_exec_nemb_cmd(phba, &nonemb_cmd, NULL, nic, sizeof(*nic)); + beiscsi_free_nemb_cmd(phba, &nonemb_cmd, rc); + return rc; } static void beiscsi_boot_process_compl(struct beiscsi_hba *phba, diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 43e8a1dafec0..5521469ce678 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1918,7 +1918,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session, spin_unlock(&session->back_lock); - p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request)); + p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(scsi_cmd_to_rq(sc))); spin_lock(&p->p_work_lock); if (unlikely(!p->iothread)) { rc = -EINVAL; diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index fc7197abfcdf..27012908b586 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -618,6 +618,12 @@ ch_checkrange(scsi_changer *ch, unsigned int type, unsigned int unit) return 0; } +struct changer_element_status32 { + int ces_type; + compat_uptr_t ces_data; +}; +#define CHIOGSTATUS32 _IOW('c', 8, struct changer_element_status32) + static long ch_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -748,7 +754,20 @@ static long ch_ioctl(struct file *file, return ch_gstatus(ch, ces.ces_type, ces.ces_data); } +#ifdef CONFIG_COMPAT + case CHIOGSTATUS32: + { + struct changer_element_status32 ces32; + if (copy_from_user(&ces32, argp, sizeof(ces32))) + return -EFAULT; + if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES) + return -EINVAL; + + return ch_gstatus(ch, ces32.ces_type, + compat_ptr(ces32.ces_data)); + } +#endif case CHIOGELEM: { struct changer_get_element cge; @@ -858,59 +877,11 @@ static long ch_ioctl(struct file *file, } default: - return scsi_ioctl(ch->device, cmd, argp); + return scsi_ioctl(ch->device, NULL, file->f_mode, cmd, argp); } } -#ifdef CONFIG_COMPAT - -struct changer_element_status32 { - int ces_type; - compat_uptr_t ces_data; -}; -#define CHIOGSTATUS32 _IOW('c', 8,struct changer_element_status32) - -static long ch_ioctl_compat(struct file * file, - unsigned int cmd, unsigned long arg) -{ - scsi_changer *ch = file->private_data; - int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd, - file->f_flags & O_NDELAY); - if (retval) - return retval; - - switch (cmd) { - case CHIOGPARAMS: - case CHIOGVPARAMS: - case CHIOPOSITION: - case CHIOMOVE: - case CHIOEXCHANGE: - case CHIOGELEM: - case CHIOINITELEM: - case CHIOSVOLTAG: - /* compatible */ - return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); - case CHIOGSTATUS32: - { - struct changer_element_status32 ces32; - unsigned char __user *data; - - if (copy_from_user(&ces32, (void __user *)arg, sizeof (ces32))) - return -EFAULT; - if (ces32.ces_type < 0 || ces32.ces_type >= CH_TYPES) - return -EINVAL; - - data = compat_ptr(ces32.ces_data); - return ch_gstatus(ch, ces32.ces_type, data); - } - default: - return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg)); - - } -} -#endif - /* ------------------------------------------------------------------------ */ static int ch_probe(struct device *dev) @@ -1015,9 +986,7 @@ static const struct file_operations changer_fops = { .open = ch_open, .release = ch_release, .unlocked_ioctl = ch_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = ch_ioctl_compat, -#endif + .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 56b9ad0a1ca0..3b2eb6ce1fcf 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1786,7 +1786,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) struct csio_scsi_qset *sqset; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); - sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)]; + sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(scsi_cmd_to_rq(cmnd))]; nr = fc_remote_port_chkready(rport); if (nr) { @@ -1989,13 +1989,13 @@ inval_scmnd: csio_info(hw, "Aborted SCSI command to (%d:%llu) tag %u\n", cmnd->device->id, cmnd->device->lun, - cmnd->request->tag); + scsi_cmd_to_rq(cmnd)->tag); return SUCCESS; } else { csio_info(hw, "Failed to abort SCSI command, (%d:%llu) tag %u\n", cmnd->device->id, cmnd->device->lun, - cmnd->request->tag); + scsi_cmd_to_rq(cmnd)->tag); return FAILED; } } diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 222593bc2afe..2f1894588e0b 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -433,7 +433,7 @@ static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, hwq = afu->hwq_rr_count++ % afu->num_hwqs; break; case HWQ_MODE_TAG: - tag = blk_mq_unique_tag(scp->request); + tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp)); hwq = blk_mq_unique_tag_to_hwq(tag); break; case HWQ_MODE_CPU: diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index a18a4a08f049..7af96d14c9bc 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -652,7 +652,7 @@ static int adpt_abort(struct scsi_cmnd * cmd) msg[2] = 0; msg[3]= 0; /* Add 1 to avoid firmware treating it as invalid command */ - msg[4] = cmd->request->tag + 1; + msg[4] = scsi_cmd_to_rq(cmd)->tag + 1; if (pHba->host) spin_lock_irq(pHba->host->host_lock); rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); @@ -2236,7 +2236,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); msg[2] = 0; /* Add 1 to avoid firmware treating it as invalid command */ - msg[3] = cmd->request->tag + 1; + msg[3] = scsi_cmd_to_rq(cmd)->tag + 1; // Our cards use the transaction context as the tag for queueing // Adaptec/DPT Private stuff msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c index e0d798d6baee..bb3b460dc0bc 100644 --- a/drivers/scsi/elx/efct/efct_lio.c +++ b/drivers/scsi/elx/efct/efct_lio.c @@ -780,7 +780,7 @@ efct_lio_npiv_make_nport(struct target_fabric_configfs *tf, { struct efct_lio_vport *lio_vport; struct efct *efct; - int ret = -1; + int ret; u64 p_wwpn, npiv_wwpn, npiv_wwnn; char *p, *pbuf, tmp[128]; struct efct_lio_vport_list_t *vport_list; diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 762cc8bd2653..0f9cedf78872 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -107,7 +107,7 @@ static void fnic_cleanup_io(struct fnic *fnic); static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, struct scsi_cmnd *sc) { - u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1); + u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1); return &fnic->io_req_lock[hash]; } @@ -390,7 +390,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, (rp->flags & FC_RP_FLAGS_RETRY)) exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; - fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag, + fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag, 0, exch_flags, io_req->sgl_cnt, SCSI_SENSE_BUFFERSIZE, io_req->sgl_list_pa, @@ -422,6 +422,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, */ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { + const int tag = scsi_cmd_to_rq(sc)->tag; struct fc_lport *lp = shost_priv(sc->device->host); struct fc_rport *rport; struct fnic_io_req *io_req = NULL; @@ -511,8 +512,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ sg_count = scsi_dma_map(sc); if (sg_count < 0) { FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, - sc->request->tag, sc, 0, sc->cmnd[0], - sg_count, CMD_STATE(sc)); + tag, sc, 0, sc->cmnd[0], sg_count, CMD_STATE(sc)); mempool_free(io_req, fnic->io_req_pool); goto out; } @@ -571,7 +571,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ * refetch the pointer under the lock. */ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, - sc->request->tag, sc, 0, 0, 0, + tag, sc, 0, 0, 0, (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); io_req = (struct fnic_io_req *)CMD_SP(sc); CMD_SP(sc) = NULL; @@ -603,8 +603,7 @@ out: sc->cmnd[5]); FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no, - sc->request->tag, sc, io_req, - sg_count, cmd_trace, + tag, sc, io_req, sg_count, cmd_trace, (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc))); /* if only we issued IO, will we have the io lock */ @@ -1364,6 +1363,7 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data, bool reserved) { + const int tag = scsi_cmd_to_rq(sc)->tag; struct fnic *fnic = data; struct fnic_io_req *io_req; unsigned long flags = 0; @@ -1371,7 +1371,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data, unsigned long start_time = 0; struct fnic_stats *fnic_stats = &fnic->fnic_stats; - io_lock = fnic_io_lock_tag(fnic, sc->request->tag); + io_lock = fnic_io_lock_tag(fnic, tag); spin_lock_irqsave(io_lock, flags); io_req = (struct fnic_io_req *)CMD_SP(sc); @@ -1413,7 +1413,7 @@ cleanup_scsi_cmd: sc->result = DID_TRANSPORT_DISRUPTED << 16; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", - sc->request->tag, sc, (jiffies - start_time)); + tag, sc, jiffies - start_time); if (atomic64_read(&fnic->io_cmpl_skip)) atomic64_dec(&fnic->io_cmpl_skip); @@ -1425,10 +1425,10 @@ cleanup_scsi_cmd: if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) shost_printk(KERN_ERR, fnic->lport->host, "Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n", - sc->request->tag, sc); + tag, sc); FNIC_TRACE(fnic_cleanup_io, - sc->device->host->host_no, sc->request->tag, sc, + sc->device->host->host_no, tag, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | @@ -1566,7 +1566,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data, { struct fnic_rport_abort_io_iter_data *iter_data = data; struct fnic *fnic = iter_data->fnic; - int abt_tag = sc->request->tag; + int abt_tag = scsi_cmd_to_rq(sc)->tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; @@ -1727,6 +1727,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport) */ int fnic_abort_cmd(struct scsi_cmnd *sc) { + struct request *const rq = scsi_cmd_to_rq(sc); struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req = NULL; @@ -1741,7 +1742,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) struct abort_stats *abts_stats; struct terminate_stats *term_stats; enum fnic_ioreq_state old_ioreq_state; - int tag; + const int tag = rq->tag; unsigned long abt_issued_time; DECLARE_COMPLETION_ONSTACK(tm_done); @@ -1757,7 +1758,6 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) term_stats = &fnic->fnic_stats.term_stats; rport = starget_to_rport(scsi_target(sc->device)); - tag = sc->request->tag; FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n", @@ -1842,8 +1842,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) /* Now queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); - if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, - fc_lun.scsi_lun, io_req)) { + if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun, + io_req)) { spin_lock_irqsave(io_lock, flags); if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) CMD_STATE(sc) = old_ioreq_state; @@ -1943,8 +1943,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) } fnic_abort_cmd_end: - FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, - sc->request->tag, sc, + FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | @@ -1994,7 +1993,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, /* fill in the lun info */ int_to_scsilun(sc->device->lun, &fc_lun); - fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST, + fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST, 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG, fc_lun.scsi_lun, io_req->port_id, fnic->config.ra_tov, fnic->config.ed_tov); @@ -2025,7 +2024,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, struct fnic_pending_aborts_iter_data *iter_data = data; struct fnic *fnic = iter_data->fnic; struct scsi_device *lun_dev = iter_data->lun_dev; - int abt_tag = sc->request->tag; + int abt_tag = scsi_cmd_to_rq(sc)->tag; struct fnic_io_req *io_req; spinlock_t *io_lock; unsigned long flags; @@ -2206,14 +2205,15 @@ clean_pending_aborts_end: static inline int fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc) { - struct request_queue *q = sc->request->q; + struct request *rq = scsi_cmd_to_rq(sc); + struct request_queue *q = rq->q; struct request *dummy; dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT); if (IS_ERR(dummy)) return SCSI_NO_TAG; - sc->tag = sc->request->tag = dummy->tag; + sc->tag = rq->tag = dummy->tag; sc->host_scribble = (unsigned char *)dummy; return dummy->tag; @@ -2238,6 +2238,7 @@ fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc) */ int fnic_device_reset(struct scsi_cmnd *sc) { + struct request *rq = scsi_cmd_to_rq(sc); struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req = NULL; @@ -2250,7 +2251,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) struct scsi_lun fc_lun; struct fnic_stats *fnic_stats; struct reset_stats *reset_stats; - int tag = 0; + int tag = rq->tag; DECLARE_COMPLETION_ONSTACK(tm_done); int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/ bool new_sc = 0; @@ -2284,7 +2285,6 @@ int fnic_device_reset(struct scsi_cmnd *sc) CMD_FLAGS(sc) = FNIC_DEVICE_RESET; /* Allocate tag if not present */ - tag = sc->request->tag; if (unlikely(tag < 0)) { /* * Really should fix the midlayer to pass in a proper @@ -2458,8 +2458,7 @@ fnic_device_reset_clean: } fnic_device_reset_end: - FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, - sc->request->tag, sc, + FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc, jiffies_to_msecs(jiffies - start_time), 0, ((u64)sc->cmnd[0] << 32 | (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 | diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 3a903e8e0384..9515c45affa5 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -185,7 +185,7 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, void *bitmap = hisi_hba->slot_index_tags; if (scsi_cmnd) - return scsi_cmnd->request->tag; + return scsi_cmd_to_rq(scsi_cmnd)->tag; spin_lock(&hisi_hba->lock); index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, @@ -449,7 +449,7 @@ static int hisi_sas_task_prep(struct sas_task *task, unsigned int dq_index; u32 blk_tag; - blk_tag = blk_mq_unique_tag(scmd->request); + blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); dq_index = blk_mq_unique_tag_to_hwq(blk_tag); *dq_pointer = dq = &hisi_hba->dq[dq_index]; } else { diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index a4885d03afe2..3ab669dc806f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -1153,7 +1153,7 @@ static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd, { unsigned char prot_op = scsi_get_prot_op(scsi_cmnd); unsigned int interval = scsi_prot_interval(scsi_cmnd); - u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request); + u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmd_to_rq(scsi_cmnd)); switch (prot_op) { case SCSI_PROT_READ_INSERT: diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index f135a10f582b..3faa87fa296a 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5686,7 +5686,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) /* Get the ptr to our adapter structure out of cmd->host. */ h = sdev_to_hba(cmd->device); - BUG_ON(cmd->request->tag < 0); + BUG_ON(scsi_cmd_to_rq(cmd)->tag < 0); dev = cmd->device->hostdata; if (!dev) { @@ -5729,7 +5729,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) * and is therefore a brand-new command. */ if (likely(cmd->retries == 0 && - !blk_rq_is_passthrough(cmd->request) && + !blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) && h->acciopath_status)) { /* Submit with the retry_pending flag unset. */ rc = hpsa_ioaccel_submit(h, c, cmd, false); @@ -5894,7 +5894,7 @@ static int hpsa_scsi_add_host(struct ctlr_info *h) */ static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) { - int idx = scmd->request->tag; + int idx = scsi_cmd_to_rq(scmd)->tag; if (idx < 0) return idx; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 935b01ee44b7..7fa5e64e38c3 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1926,7 +1926,7 @@ static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) struct ibmvfc_cmd *vfc_cmd; struct ibmvfc_fcp_cmd_iu *iu; struct ibmvfc_event *evt; - u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request); + u32 tag_and_hwq = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq); u16 scsi_channel; int rc; diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index e6a3eaaa57d9..50df7dd9cb91 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -1072,7 +1072,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, init_event_struct(evt_struct, handle_cmd_rsp, VIOSRP_SRP_FORMAT, - cmnd->request->timeout/HZ); + scsi_cmd_to_rq(cmnd)->timeout / HZ); evt_struct->cmnd = cmnd; evt_struct->cmnd_done = done; diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 8b33c9871484..cdd94fb2aab7 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -3735,7 +3735,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb) scb->cmd.dcdb.segment_4G = 0; scb->cmd.dcdb.enhanced_sg = 0; - TimeOut = scb->scsi_cmd->request->timeout; + TimeOut = scsi_cmd_to_rq(scb->scsi_cmd)->timeout; if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */ if (!scb->sg_len) { diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index e1ff79464131..fcaa84a3c210 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -341,7 +341,7 @@ static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) tc->reserved_E8_0 = 0; if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) - tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff; + tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd); else if (type & SCSI_PROT_DIF_TYPE3) tc->ref_tag_seed_gen = 0; } @@ -369,7 +369,7 @@ static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) tc->app_tag_gen = 0; if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) - tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff; + tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd); else if (type & SCSI_PROT_DIF_TYPE3) tc->ref_tag_seed_verify = 0; diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig index 052ee3a26f6e..c640535d1ac0 100644 --- a/drivers/scsi/libsas/Kconfig +++ b/drivers/scsi/libsas/Kconfig @@ -10,7 +10,6 @@ config SCSI_SAS_LIBSAS tristate "SAS Domain Transport Attributes" depends on SCSI select SCSI_SAS_ATTRS - select BLK_DEV_BSGLIB help This provides transport specific helpers for SAS drivers which use the domain device construct (like the aic94xxx). diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile index e63a54f5ab8c..9dc32736cf21 100644 --- a/drivers/scsi/libsas/Makefile +++ b/drivers/scsi/libsas/Makefile @@ -18,4 +18,4 @@ libsas-y += sas_init.o \ libsas-$(CONFIG_SCSI_SAS_ATA) += sas_ata.o libsas-$(CONFIG_SCSI_SAS_HOST_SMP) += sas_host_smp.o -ccflags-y := -DDEBUG +ccflags-y := -DDEBUG -I$(srctree)/drivers/scsi diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 4aa1fda95f35..a315715b3622 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -20,8 +20,8 @@ #include <scsi/scsi.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> -#include "../scsi_sas_internal.h" -#include "../scsi_transport_api.h" +#include "scsi_sas_internal.h" +#include "scsi_transport_api.h" #include <scsi/scsi_eh.h> static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts) @@ -596,7 +596,7 @@ void sas_ata_task_abort(struct sas_task *task) /* Bounce SCSI-initiated commands to the SCSI EH */ if (qc->scsicmd) { - blk_abort_request(qc->scsicmd->request); + blk_abort_request(scsi_cmd_to_rq(qc->scsicmd)); return; } diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 9f5068f3bcfb..12e1e36d7c04 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -16,7 +16,7 @@ #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include <scsi/sas_ata.h> -#include "../scsi_sas_internal.h" +#include "scsi_sas_internal.h" /* ---------- Basic task processing for discovery purposes ---------- */ @@ -461,7 +461,7 @@ static void sas_discover_domain(struct work_struct *work) break; #else pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); - /* Fall through */ + fallthrough; #endif /* Fall through - only for the #else condition above. */ default: diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index e00688540219..c2150a818423 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -18,7 +18,7 @@ #include <scsi/sas_ata.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> -#include "../scsi_sas_internal.h" +#include "scsi_sas_internal.h" static int sas_discover_expander(struct domain_device *dev); static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr); diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c index eca2a6bf3601..32cdc969b736 100644 --- a/drivers/scsi/libsas/sas_host_smp.c +++ b/drivers/scsi/libsas/sas_host_smp.c @@ -14,7 +14,7 @@ #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> -#include "../scsi_sas_internal.h" +#include "scsi_sas_internal.h" static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data, u8 phy_id) diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 2b0f98ca6ec3..80592f53017a 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -19,7 +19,7 @@ #include "sas_internal.h" -#include "../scsi_sas_internal.h" +#include "scsi_sas_internal.h" static struct kmem_cache *sas_task_cache; static struct kmem_cache *sas_event_cache; diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index 4ca4b1f30bd0..a0d592d11dfb 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c @@ -10,7 +10,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> -#include "../scsi_sas_internal.h" +#include "scsi_sas_internal.h" /* ---------- Phy events ---------- */ diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index e3d03d744713..67b429dcf1ff 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -10,7 +10,7 @@ #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> -#include "../scsi_sas_internal.h" +#include "scsi_sas_internal.h" static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy) { diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index ee44a0d7730b..08ffb8788290 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -22,9 +22,9 @@ #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include <scsi/sas_ata.h> -#include "../scsi_sas_internal.h" -#include "../scsi_transport_api.h" -#include "../scsi_priv.h" +#include "scsi_sas_internal.h" +#include "scsi_transport_api.h" +#include "scsi_priv.h" #include <linux/err.h> #include <linux/blkdev.h> @@ -908,7 +908,7 @@ void sas_task_abort(struct sas_task *task) if (dev_is_sata(task->dev)) sas_ata_task_abort(task); else - blk_abort_request(sc->request); + blk_abort_request(scsi_cmd_to_rq(sc)); } int sas_slave_alloc(struct scsi_device *sdev) diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 17028861234b..dd3ddfa5f761 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -922,7 +922,6 @@ struct lpfc_hba { uint8_t wwpn[8]; uint32_t RandomData[7]; uint8_t fcp_embed_io; - uint8_t nvme_support; /* Firmware supports NVME */ uint8_t nvmet_support; /* driver supports NVMET */ #define LPFC_NVMET_MAX_PORTS 32 uint8_t mds_diags_support; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index eb88aaaf36eb..869c2b6f1515 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -4038,6 +4038,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, const char *val_buf = buf; int err; uint32_t prev_val; + u8 sli_family, if_type; if (!strncmp(buf, "nolip ", strlen("nolip "))) { nolip = 1; @@ -4061,13 +4062,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, /* * The 'topology' is not a configurable parameter if : * - persistent topology enabled - * - G7/G6 with no private loop support + * - ASIC_GEN_NUM >= 0xC, with no private loop support */ - + sli_family = bf_get(lpfc_sli_intf_sli_family, + &phba->sli4_hba.sli_intf); + if_type = bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf); if ((phba->hba_flag & HBA_PERSISTENT_TOPO || - (!phba->sli4_hba.pc_sli4_params.pls && - (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || - phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) && + (!phba->sli4_hba.pc_sli4_params.pls && + (sli_family == LPFC_SLI_INTF_FAMILY_G6 || + if_type == LPFC_SLI_INTF_IF_TYPE_6))) && val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3114 Loop mode not supported\n"); @@ -5412,9 +5416,9 @@ LPFC_VPORT_ATTR_R(fcp_class, 3, 2, 3, /* # lpfc_use_adisc: Use ADISC for FCP rediscovery instead of PLOGI. Value range -# is [0,1]. Default value is 0. +# is [0,1]. Default value is 1. */ -LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, +LPFC_VPORT_ATTR_RW(use_adisc, 1, 0, 1, "Use ADISC on rediscovery to authenticate FCP devices"); /* @@ -6741,6 +6745,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost) case LPFC_LINK_SPEED_128GHZ: fc_host_speed(shost) = FC_PORTSPEED_128GBIT; break; + case LPFC_LINK_SPEED_256GHZ: + fc_host_speed(shost) = FC_PORTSPEED_256GBIT; + break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 737483c3f01d..41e0d8ef015a 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -87,6 +87,8 @@ void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_nlp_set_state(struct lpfc_vport *, struct lpfc_nodelist *, int); +void lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); +void lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); void lpfc_drop_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_set_disctmo(struct lpfc_vport *); int lpfc_can_disctmo(struct lpfc_vport *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 610b6dabb3b5..a1c85fa135a9 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -2846,6 +2846,8 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, ae->un.AttrInt = 0; if (!(phba->hba_flag & HBA_FCOE_MODE)) { + if (phba->lmt & LMT_256Gb) + ae->un.AttrInt |= HBA_PORTSPEED_256GFC; if (phba->lmt & LMT_128Gb) ae->un.AttrInt |= HBA_PORTSPEED_128GFC; if (phba->lmt & LMT_64Gb) @@ -2927,6 +2929,9 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, case LPFC_LINK_SPEED_128GHZ: ae->un.AttrInt = HBA_PORTSPEED_128GFC; break; + case LPFC_LINK_SPEED_256GHZ: + ae->un.AttrInt = HBA_PORTSPEED_256GFC; + break; default: ae->un.AttrInt = HBA_PORTSPEED_UNKNOWN; break; @@ -3884,9 +3889,8 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /** * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort * @vport: pointer to a host virtual N_Port data structure. - * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID) - * cmdcode: FDMI command to send - * mask: Mask of HBA or PORT Attributes to send + * @cmdcode: application server command code to send + * @vmid: pointer to vmid info structure * * Builds and sends a FDMI command using the CT subsystem. */ diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 131374a61d7e..871b665bd72e 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -78,10 +78,11 @@ struct lpfc_node_rrqs { }; enum lpfc_fc4_xpt_flags { - NLP_WAIT_FOR_UNREG = 0x1, - SCSI_XPT_REGD = 0x2, - NVME_XPT_REGD = 0x4, - NLP_XPT_HAS_HH = 0x8, + NLP_XPT_REGD = 0x1, + SCSI_XPT_REGD = 0x2, + NVME_XPT_REGD = 0x4, + NVME_XPT_UNREG_WAIT = 0x8, + NLP_XPT_HAS_HH = 0x10 }; struct lpfc_nodelist { diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index e481f5fe29d7..08ae2b12b92c 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1664,6 +1664,12 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, if (!new_ndlp || (new_ndlp == ndlp)) return ndlp; + /* + * Unregister from backend if not done yet. Could have been skipped + * due to ADISC + */ + lpfc_nlp_unreg_node(vport, new_ndlp); + if (phba->sli_rev == LPFC_SLI_REV4) { active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, GFP_KERNEL); @@ -2025,9 +2031,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if (lpfc_error_lost_link(irsp)) - goto check_plogi; - else + if (!lpfc_error_lost_link(irsp)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); @@ -2080,7 +2084,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, NLP_EVT_CMPL_PLOGI); } - check_plogi: if (disc && vport->num_disc_nodes) { /* Check to see if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); @@ -2607,6 +2610,14 @@ lpfc_adisc_done(struct lpfc_vport *vport) if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { + + /* + * If link is down, clear_la and reg_vpi will be done after + * flogi following a link up event + */ + if (!lpfc_is_link_up(phba)) + return; + /* The ADISCs are complete. Doesn't matter if they * succeeded or failed because the ADISC completion * routine guarantees to call the state machine and @@ -2749,12 +2760,9 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "2755 ADISC failure DID:%06X Status:x%x/x%x\n", ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4]); - /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ - if (lpfc_error_lost_link(irsp)) - goto check_adisc; - else - lpfc_disc_state_machine(vport, ndlp, cmdiocb, - NLP_EVT_CMPL_ADISC); + + lpfc_disc_state_machine(vport, ndlp, cmdiocb, + NLP_EVT_CMPL_ADISC); /* As long as this node is not registered with the SCSI or NVMe * transport, it is no longer an active node. Otherwise @@ -2772,7 +2780,6 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_ADISC); - check_adisc: /* Check to see if there are more ADISCs to be sent */ if (disc && vport->num_disc_nodes) lpfc_more_adisc(vport); @@ -3375,6 +3382,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) if (phba->sli_rev == LPFC_SLI_REV4) { rc = lpfc_reg_fab_ctrl_node(vport, ndlp); if (rc) { + lpfc_els_free_iocb(phba, elsiocb); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0937 %s: Failed to reg fc node, rc %d\n", __func__, rc); @@ -3413,7 +3421,6 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) return 1; } - /* Keep the ndlp just in case RDF is being sent */ return 0; } @@ -3657,28 +3664,15 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) lpfc_enqueue_node(vport, ndlp); } - /* RDF ELS is not required on an NPIV VN_Port. */ - if (vport->port_type == LPFC_NPIV_PORT) { - lpfc_nlp_put(ndlp); + /* RDF ELS is not required on an NPIV VN_Port. */ + if (vport->port_type == LPFC_NPIV_PORT) return -EACCES; - } elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_RDF); if (!elsiocb) return -ENOMEM; - if (phba->sli_rev == LPFC_SLI_REV4 && - !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, - "0939 %s: FC_NODE x%x RPI x%x flag x%x " - "ste x%x type x%x Not registered\n", - __func__, ndlp->nlp_DID, ndlp->nlp_rpi, - ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_type); - return -ENODEV; - } - /* Configure the payload for the supported FPIN events. */ prdf = (struct lpfc_els_rdf_req *) (((struct lpfc_dmabuf *)elsiocb->context2)->virt); @@ -4378,7 +4372,7 @@ out_retry: (cmd == ELS_CMD_NVMEPRLI)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); - else + else if (cmd != ELS_CMD_ADISC) lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); ndlp->nlp_last_elscmd = cmd; @@ -4612,6 +4606,15 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { + + /* If PLOGI is being retried, PLOGI completion will cleanup the + * node. The NLP_NPR_2B_DISC flag needs to be retained to make + * progress on nodes discovered from last RSCN. + */ + if ((ndlp->nlp_flag & NLP_DELAY_TMO) && + (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) + goto out; + /* NPort Recovery mode or node is just allocated */ if (!lpfc_nlp_not_used(ndlp)) { /* A LOGO is completing and the node is in NPR state. @@ -5657,25 +5660,40 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS ADISCs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { - if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); - ndlp->nlp_prev_state = ndlp->nlp_state; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(vport, ndlp, 0); - sentadisc++; - vport->num_disc_nodes++; - if (vport->num_disc_nodes >= - vport->cfg_discovery_threads) { - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_NLP_MORE; - spin_unlock_irq(shost->host_lock); - break; - } + + if (ndlp->nlp_state != NLP_STE_NPR_NODE || + !(ndlp->nlp_flag & NLP_NPR_ADISC)) + continue; + + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_ADISC; + spin_unlock_irq(&ndlp->lock); + + if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + /* This node was marked for ADISC but was not picked + * for discovery. This is possible if the node was + * missing in gidft response. + * + * At time of marking node for ADISC, we skipped unreg + * from backend + */ + lpfc_nlp_unreg_node(vport, ndlp); + continue; } + + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); + lpfc_issue_els_adisc(vport, ndlp, 0); + sentadisc++; + vport->num_disc_nodes++; + if (vport->num_disc_nodes >= + vport->cfg_discovery_threads) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_NLP_MORE; + spin_unlock_irq(shost->host_lock); + break; + } + } if (sentadisc == 0) { spin_lock_irq(shost->host_lock); @@ -6087,6 +6105,12 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) case LPFC_LINK_SPEED_64GHZ: rdp_speed = RDP_PS_64GB; break; + case LPFC_LINK_SPEED_128GHZ: + rdp_speed = RDP_PS_128GB; + break; + case LPFC_LINK_SPEED_256GHZ: + rdp_speed = RDP_PS_256GB; + break; default: rdp_speed = RDP_PS_UNKNOWN; break; @@ -6094,6 +6118,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) desc->info.port_speed.speed = cpu_to_be16(rdp_speed); + if (phba->lmt & LMT_256Gb) + rdp_cap |= RDP_PS_256GB; if (phba->lmt & LMT_128Gb) rdp_cap |= RDP_PS_128GB; if (phba->lmt & LMT_64Gb) @@ -6886,13 +6912,6 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) continue; } - /* Check to see if we need to NVME rescan this target - * remoteport. - */ - if (ndlp->nlp_fc4_type & NLP_FC4_NVME && - ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) - lpfc_nvme_rescan_port(vport, ndlp); - lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); lpfc_cancel_retry_delay_tmo(vport, ndlp); @@ -8948,6 +8967,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; } lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO); + if (newnode) + lpfc_disc_state_machine(vport, ndlp, NULL, + NLP_EVT_DEVICE_RM); break; case ELS_CMD_PRLO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 7cc5920979f8..6da2daf7d9e3 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -3331,6 +3331,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) case LPFC_LINK_SPEED_32GHZ: case LPFC_LINK_SPEED_64GHZ: case LPFC_LINK_SPEED_128GHZ: + case LPFC_LINK_SPEED_256GHZ: break; default: phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; @@ -4501,10 +4502,152 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) spin_unlock_irqrestore(shost->host_lock, iflags); } +/* Register a node with backend if not already done */ +void +lpfc_nlp_reg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + + unsigned long iflags; + + spin_lock_irqsave(&ndlp->lock, iflags); + if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { + /* Already registered with backend, trigger rescan */ + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (ndlp->fc4_xpt_flags & NVME_XPT_REGD && + ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) { + lpfc_nvme_rescan_port(vport, ndlp); + } + return; + } + + ndlp->fc4_xpt_flags |= NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (lpfc_valid_xpt_node(ndlp)) { + vport->phba->nport_event_cnt++; + /* + * Tell the fc transport about the port, if we haven't + * already. If we have, and it's a scsi entity, be + */ + lpfc_register_remote_port(vport, ndlp); + } + + /* We are done if we do not have any NVME remote node */ + if (!(ndlp->nlp_fc4_type & NLP_FC4_NVME)) + return; + + /* Notify the NVME transport of this new rport. */ + if (vport->phba->sli_rev >= LPFC_SLI_REV4 && + ndlp->nlp_fc4_type & NLP_FC4_NVME) { + if (vport->phba->nvmet_support == 0) { + /* Register this rport with the transport. + * Only NVME Target Rports are registered with + * the transport. + */ + if (ndlp->nlp_type & NLP_NVME_TARGET) { + vport->phba->nport_event_cnt++; + lpfc_nvme_register_port(vport, ndlp); + } + } else { + /* Just take an NDLP ref count since the + * target does not register rports. + */ + lpfc_nlp_get(ndlp); + } + } +} + +/* Unregister a node with backend if not already done */ +void +lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + + spin_lock_irqsave(&ndlp->lock, iflags); + if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) { + spin_unlock_irqrestore(&ndlp->lock, iflags); + return; + } + + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + spin_unlock_irqrestore(&ndlp->lock, iflags); + + if (ndlp->rport && + ndlp->fc4_xpt_flags & SCSI_XPT_REGD) { + vport->phba->nport_event_cnt++; + lpfc_unregister_remote_port(ndlp); + } + + if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) { + vport->phba->nport_event_cnt++; + if (vport->phba->nvmet_support == 0) { + /* Start devloss if target. */ + if (ndlp->nlp_type & NLP_NVME_TARGET) + lpfc_nvme_unregister_port(vport, ndlp); + } else { + /* NVMET has no upcall. */ + lpfc_nlp_put(ndlp); + } + } + +} + +/* + * Adisc state change handling + */ +static void +lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, + int new_state) +{ + switch (new_state) { + /* + * Any state to ADISC_ISSUE + * Do nothing, adisc cmpl handling will trigger state changes + */ + case NLP_STE_ADISC_ISSUE: + break; + + /* + * ADISC_ISSUE to mapped states + * Trigger a registration with backend, it will be nop if + * already registered + */ + case NLP_STE_UNMAPPED_NODE: + ndlp->nlp_type |= NLP_FC_NODE; + fallthrough; + case NLP_STE_MAPPED_NODE: + ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + lpfc_nlp_reg_node(vport, ndlp); + break; + + /* + * ADISC_ISSUE to non-mapped states + * We are moving from ADISC_ISSUE to a non-mapped state because + * ADISC failed, we would have skipped unregistering with + * backend, attempt it now + */ + case NLP_STE_NPR_NODE: + ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + fallthrough; + default: + lpfc_nlp_unreg_node(vport, ndlp); + break; + } + +} + static void lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int old_state, int new_state) { + /* Trap ADISC changes here */ + if (new_state == NLP_STE_ADISC_ISSUE || + old_state == NLP_STE_ADISC_ISSUE) { + lpfc_handle_adisc_state(vport, ndlp, new_state); + return; + } + if (new_state == NLP_STE_UNMAPPED_NODE) { ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; ndlp->nlp_type |= NLP_FC_NODE; @@ -4514,60 +4657,17 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (new_state == NLP_STE_NPR_NODE) ndlp->nlp_flag &= ~NLP_RCV_PLOGI; - /* FCP and NVME Transport interface */ + /* Reg/Unreg for FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || old_state == NLP_STE_UNMAPPED_NODE)) { - if (ndlp->rport && - lpfc_valid_xpt_node(ndlp)) { - vport->phba->nport_event_cnt++; - lpfc_unregister_remote_port(ndlp); - } - - if (ndlp->nlp_fc4_type & NLP_FC4_NVME) { - vport->phba->nport_event_cnt++; - if (vport->phba->nvmet_support == 0) { - /* Start devloss if target. */ - if (ndlp->nlp_type & NLP_NVME_TARGET) - lpfc_nvme_unregister_port(vport, ndlp); - } else { - /* NVMET has no upcall. */ - lpfc_nlp_put(ndlp); - } - } + /* For nodes marked for ADISC, Handle unreg in ADISC cmpl */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) + lpfc_nlp_unreg_node(vport, ndlp); } - /* FCP and NVME Transport interfaces */ - if (new_state == NLP_STE_MAPPED_NODE || - new_state == NLP_STE_UNMAPPED_NODE) { - if (lpfc_valid_xpt_node(ndlp)) { - vport->phba->nport_event_cnt++; - /* - * Tell the fc transport about the port, if we haven't - * already. If we have, and it's a scsi entity, be - */ - lpfc_register_remote_port(vport, ndlp); - } - /* Notify the NVME transport of this new rport. */ - if (vport->phba->sli_rev >= LPFC_SLI_REV4 && - ndlp->nlp_fc4_type & NLP_FC4_NVME) { - if (vport->phba->nvmet_support == 0) { - /* Register this rport with the transport. - * Only NVME Target Rports are registered with - * the transport. - */ - if (ndlp->nlp_type & NLP_NVME_TARGET) { - vport->phba->nport_event_cnt++; - lpfc_nvme_register_port(vport, ndlp); - } - } else { - /* Just take an NDLP ref count since the - * target does not register rports. - */ - lpfc_nlp_get(ndlp); - } - } - } + new_state == NLP_STE_UNMAPPED_NODE) + lpfc_nlp_reg_node(vport, ndlp); if ((new_state == NLP_STE_MAPPED_NODE) && (vport->stat_data_enabled)) { diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 4a5a85ed42ec..4083764916a5 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -1694,6 +1694,7 @@ struct lpfc_fdmi_reg_portattr { #define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268 #define PCI_DEVICE_ID_LANCER_G6_FC 0xe300 #define PCI_DEVICE_ID_LANCER_G7_FC 0xf400 +#define PCI_DEVICE_ID_LANCER_G7P_FC 0xf500 #define PCI_DEVICE_ID_SAT_SMB 0xf011 #define PCI_DEVICE_ID_SAT_MID 0xf015 #define PCI_DEVICE_ID_RFLY 0xf095 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index eb8c735a243b..aadbb0de629d 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -94,6 +94,9 @@ struct lpfc_sli_intf { #define LPFC_SLI_INTF_FAMILY_BE3 0x1 #define LPFC_SLI_INTF_FAMILY_LNCR_A0 0xa #define LPFC_SLI_INTF_FAMILY_LNCR_B0 0xb +#define LPFC_SLI_INTF_FAMILY_G6 0xc +#define LPFC_SLI_INTF_FAMILY_G7 0xd +#define LPFC_SLI_INTF_FAMILY_G7P 0xe #define lpfc_sli_intf_slirev_SHIFT 4 #define lpfc_sli_intf_slirev_MASK 0x0000000F #define lpfc_sli_intf_slirev_WORD word0 @@ -959,6 +962,12 @@ union lpfc_sli4_cfg_shdr { #define lpfc_mbox_hdr_add_status_SHIFT 8 #define lpfc_mbox_hdr_add_status_MASK 0x000000FF #define lpfc_mbox_hdr_add_status_WORD word7 +#define LPFC_ADD_STATUS_INCOMPAT_OBJ 0xA2 +#define lpfc_mbox_hdr_add_status_2_SHIFT 16 +#define lpfc_mbox_hdr_add_status_2_MASK 0x000000FF +#define lpfc_mbox_hdr_add_status_2_WORD word7 +#define LPFC_ADD_STATUS_2_INCOMPAT_FLASH 0x01 +#define LPFC_ADD_STATUS_2_INCORRECT_ASIC 0x02 uint32_t response_length; uint32_t actual_response_length; } response; @@ -1555,7 +1564,7 @@ struct rq_context { #define lpfc_rq_context_hdr_size_WORD word1 uint32_t word2; #define lpfc_rq_context_cq_id_SHIFT 16 -#define lpfc_rq_context_cq_id_MASK 0x000003FF +#define lpfc_rq_context_cq_id_MASK 0x0000FFFF #define lpfc_rq_context_cq_id_WORD word2 #define lpfc_rq_context_buf_size_SHIFT 0 #define lpfc_rq_context_buf_size_MASK 0x0000FFFF @@ -3328,17 +3337,20 @@ struct lpfc_sli4_parameters { #define cfg_nosr_SHIFT 9 #define cfg_nosr_MASK 0x00000001 #define cfg_nosr_WORD word19 - #define cfg_bv1s_SHIFT 10 #define cfg_bv1s_MASK 0x00000001 #define cfg_bv1s_WORD word19 -#define cfg_pvl_SHIFT 13 -#define cfg_pvl_MASK 0x00000001 -#define cfg_pvl_WORD word19 #define cfg_nsler_SHIFT 12 #define cfg_nsler_MASK 0x00000001 #define cfg_nsler_WORD word19 +#define cfg_pvl_SHIFT 13 +#define cfg_pvl_MASK 0x00000001 +#define cfg_pvl_WORD word19 + +#define cfg_pbde_SHIFT 20 +#define cfg_pbde_MASK 0x00000001 +#define cfg_pbde_WORD word19 uint32_t word20; #define cfg_max_tow_xri_SHIFT 0 @@ -3603,6 +3615,9 @@ struct lpfc_controller_attribute { #define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8 #define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff #define lpfc_cntl_attr_eprom_ver_hi_WORD word17 +#define lpfc_cntl_attr_flash_id_SHIFT 16 +#define lpfc_cntl_attr_flash_id_MASK 0x000000ff +#define lpfc_cntl_attr_flash_id_WORD word17 uint32_t mbx_da_struct_ver; uint32_t ep_fw_da_struct_ver; uint32_t ncsi_ver_str[3]; @@ -4707,6 +4722,7 @@ union lpfc_wqe128 { #define MAGIC_NUMBER_G6 0xFEAA0003 #define MAGIC_NUMBER_G7 0xFEAA0005 +#define MAGIC_NUMBER_G7P 0xFEAA0020 struct lpfc_grp_hdr { uint32_t size; diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h index d48414e295a0..6a90e6e53d09 100644 --- a/drivers/scsi/lpfc/lpfc_ids.h +++ b/drivers/scsi/lpfc/lpfc_ids.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -118,6 +118,8 @@ const struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7_FC, PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G7P_FC, + PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK, PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF, diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 5983e05b648f..2c0aaa0a301d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1852,6 +1852,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, { int rc; uint32_t intr_mode; + LPFC_MBOXQ_t *mboxq; if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= LPFC_SLI_INTF_IF_TYPE_2) { @@ -1871,11 +1872,19 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, "Recovery...\n"); /* If we are no wait, the HBA has been reset and is not - * functional, thus we should clear LPFC_SLI_ACTIVE flag. + * functional, thus we should clear + * (LPFC_SLI_ACTIVE | LPFC_SLI_MBOX_ACTIVE) flags. */ if (mbx_action == LPFC_MBX_NO_WAIT) { spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; + if (phba->sli.mbox_active) { + mboxq = phba->sli.mbox_active; + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + } spin_unlock_irq(&phba->hbalock); } @@ -2590,6 +2599,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_LANCER_G7_FC: m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"}; break; + case PCI_DEVICE_ID_LANCER_G7P_FC: + m = (typeof(m)){"LPe38000", "PCIe", "Fibre Channel Adapter"}; + break; case PCI_DEVICE_ID_SKYHAWK: case PCI_DEVICE_ID_SKYHAWK_VF: oneConnect = 1; @@ -3541,6 +3553,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; spin_unlock_irq(&ndlp->lock); + + lpfc_unreg_rpi(vports[i], ndlp); /* * Whenever an SLI4 port goes offline, free the * RPI. Get a new RPI when the adapter port @@ -3556,7 +3570,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } - lpfc_unreg_rpi(vports[i], ndlp); if (ndlp->nlp_type & NLP_FABRIC) { lpfc_disc_state_machine(vports[i], ndlp, @@ -4666,6 +4679,8 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) if (phba->hba_flag & HBA_FCOE_MODE) return; + if (phba->lmt & LMT_256Gb) + fc_host_supported_speeds(shost) |= FC_PORTSPEED_256GBIT; if (phba->lmt & LMT_128Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; if (phba->lmt & LMT_64Gb) @@ -4845,7 +4860,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) /** * lpfc_vmid_poll - VMID timeout detection - * @ptr: Map to lpfc_hba data structure pointer. + * @t: Timer context used to obtain the pointer to lpfc hba data structure. * * This routine is invoked when there is no I/O on by a VM for the specified * amount of time. When this situation is detected, the VMID has to be @@ -5074,6 +5089,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, case LPFC_FC_LA_SPEED_128G: port_speed = 128000; break; + case LPFC_FC_LA_SPEED_256G: + port_speed = 256000; + break; default: port_speed = 0; } @@ -8537,9 +8555,12 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) } /* FW supports persistent topology - override module parameter value */ phba->hba_flag |= HBA_PERSISTENT_TOPO; - switch (phba->pcidev->device) { - case PCI_DEVICE_ID_LANCER_G7_FC: - case PCI_DEVICE_ID_LANCER_G6_FC: + + /* if ASIC_GEN_NUM >= 0xC) */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_6) || + (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_FAMILY_G6)) { if (!tf) { phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) ? FLAGS_TOPOLOGY_MODE_LOOP @@ -8547,8 +8568,7 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) } else { phba->hba_flag &= ~HBA_PERSISTENT_TOPO; } - break; - default: /* G5 */ + } else { /* G5 */ if (tf) { /* If topology failover set - pt is '0' or '1' */ phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : @@ -8558,7 +8578,6 @@ lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) ? FLAGS_TOPOLOGY_MODE_PT_PT : FLAGS_TOPOLOGY_MODE_LOOP); } - break; } if (phba->hba_flag & HBA_PERSISTENT_TOPO) { lpfc_printf_log(phba, KERN_INFO, LOG_SLI, @@ -12241,7 +12260,6 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) bf_get(cfg_xib, mbx_sli4_parameters), phba->cfg_enable_fc4_type); fcponly: - phba->nvme_support = 0; phba->nvmet_support = 0; phba->cfg_nvmet_mrq = 0; phba->cfg_nvme_seg_cnt = 0; @@ -12259,9 +12277,10 @@ fcponly: if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; - /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ - if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != - LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) + /* Enable embedded Payload BDE if support is indicated */ + if (bf_get(cfg_pbde, mbx_sli4_parameters)) + phba->cfg_enable_pbde = 1; + else phba->cfg_enable_pbde = 0; /* @@ -12299,7 +12318,7 @@ fcponly: "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", bf_get(cfg_xib, mbx_sli4_parameters), phba->cfg_enable_pbde, - phba->fcp_embed_io, phba->nvme_support, + phba->fcp_embed_io, sli4_params->nvme, phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == @@ -12978,7 +12997,9 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, const struct firmware *fw) { int rc; + u8 sli_family; + sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); /* Three cases: (1) FW was not supported on the detected adapter. * (2) FW update has been locked out administratively. * (3) Some other error during FW update. @@ -12986,10 +13007,12 @@ lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, * for admin diagnosis. */ if (offset == ADD_STATUS_FW_NOT_SUPPORTED || - (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && + (sli_family == LPFC_SLI_INTF_FAMILY_G6 && magic_number != MAGIC_NUMBER_G6) || - (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && - magic_number != MAGIC_NUMBER_G7)) { + (sli_family == LPFC_SLI_INTF_FAMILY_G7 && + magic_number != MAGIC_NUMBER_G7) || + (sli_family == LPFC_SLI_INTF_FAMILY_G7P && + magic_number != MAGIC_NUMBER_G7P)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3030 This firmware version is not supported on" " this HBA model. Device:%x Magic:%x Type:%x " @@ -14040,17 +14063,18 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba) void lpfc_sli4_ras_init(struct lpfc_hba *phba) { - switch (phba->pcidev->device) { - case PCI_DEVICE_ID_LANCER_G6_FC: - case PCI_DEVICE_ID_LANCER_G7_FC: + /* if ASIC_GEN_NUM >= 0xC) */ + if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_IF_TYPE_6) || + (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) == + LPFC_SLI_INTF_FAMILY_G6)) { phba->ras_fwlog.ras_hwsupport = true; if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) && phba->cfg_ras_fwlog_buffsize) phba->ras_fwlog.ras_enabled = true; else phba->ras_fwlog.ras_enabled = false; - break; - default: + } else { phba->ras_fwlog.ras_hwsupport = false; } } @@ -14163,8 +14187,9 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba) unsigned int temp_idx; int i; int j = 0; - unsigned long rem_nsec; - struct lpfc_vport **vports; + unsigned long rem_nsec, iflags; + bool log_verbose = false; + struct lpfc_vport *port_iterator; /* Don't dump messages if we explicitly set log_verbose for the * physical port or any vport. @@ -14172,16 +14197,24 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba) if (phba->cfg_log_verbose) return; - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) { - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { - if (vports[i]->cfg_log_verbose) { - lpfc_destroy_vport_work_array(phba, vports); + spin_lock_irqsave(&phba->port_list_lock, iflags); + list_for_each_entry(port_iterator, &phba->port_list, listentry) { + if (port_iterator->load_flag & FC_UNLOADING) + continue; + if (scsi_host_get(lpfc_shost_from_vport(port_iterator))) { + if (port_iterator->cfg_log_verbose) + log_verbose = true; + + scsi_host_put(lpfc_shost_from_vport(port_iterator)); + + if (log_verbose) { + spin_unlock_irqrestore(&phba->port_list_lock, + iflags); return; } } } - lpfc_destroy_vport_work_array(phba, vports); + spin_unlock_irqrestore(&phba->port_list_lock, iflags); if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0) return; diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 84bc373190d8..6c754ee96bee 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -513,8 +513,9 @@ lpfc_init_link(struct lpfc_hba * phba, break; } - if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || - phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + /* Topology handling for ASIC_GEN_NUM 0xC and later */ + if ((phba->sli4_hba.pc_sli4_params.sli_family == LPFC_SLI_INTF_FAMILY_G6 || + phba->sli4_hba.pc_sli4_params.if_type == LPFC_SLI_INTF_IF_TYPE_6) && !(phba->sli4_hba.pc_sli4_params.pls) && mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index e12f83fb795c..27263f02ab9f 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -736,9 +736,13 @@ out: * is already in MAPPED or UNMAPPED state. Catch this * condition and don't set the nlp_state again because * it causes an unnecessary transport unregister/register. + * + * Nodes marked for ADISC will move MAPPED or UNMAPPED state + * after issuing ADISC */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { - if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) + if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && + !(ndlp->nlp_flag & NLP_NPR_ADISC)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } @@ -863,6 +867,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } out: + /* Unregister from backend, could have been skipped due to ADISC */ + lpfc_nlp_unreg_node(vport, ndlp); + ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -1677,9 +1684,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, spin_unlock_irq(&ndlp->lock); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; - memset(&ndlp->nlp_nodename, 0, sizeof(struct lpfc_name)); - memset(&ndlp->nlp_portname, 0, sizeof(struct lpfc_name)); - ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); lpfc_unreg_rpi(vport, ndlp); @@ -2597,13 +2601,14 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport, void *arg, uint32_t evt) { + lpfc_disc_set_adisc(vport, ndlp); + ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); - lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2645,14 +2650,13 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - if (ndlp->nlp_flag & NLP_NPR_ADISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - spin_unlock_irq(&ndlp->lock); - lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(vport, ndlp, 0); - } else { + /* + * ADISC nodes will be handled in regular discovery path after + * receiving response from NS. + * + * For other nodes, Send PLOGI to trigger an implicit LOGO. + */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2685,12 +2689,13 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { - if (ndlp->nlp_flag & NLP_NPR_ADISC) { - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - ndlp->nlp_prev_state = NLP_STE_NPR_NODE; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE); - lpfc_issue_els_adisc(vport, ndlp, 0); - } else { + /* + * ADISC nodes will be handled in regular discovery path after + * receiving response from NS. + * + * For other nodes, Send PLOGI to trigger an implicit LOGO. + */ + if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index bcc804cefd30..f36294e9b5dd 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -216,8 +216,8 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) /* The register rebind might have occurred before the delete * downcall. Guard against this race. */ - if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG) - ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD); + if (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT) + ndlp->fc4_xpt_flags &= ~(NVME_XPT_UNREG_WAIT | NVME_XPT_REGD); spin_unlock_irq(&ndlp->lock); @@ -2324,7 +2324,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * race that leaves the WAIT flag set. */ spin_lock_irq(&ndlp->lock); - ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG; + ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; ndlp->fc4_xpt_flags |= NVME_XPT_REGD; spin_unlock_irq(&ndlp->lock); rport = remote_port->private; @@ -2336,7 +2336,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) */ spin_lock_irq(&ndlp->lock); ndlp->nrport = NULL; - ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG; + ndlp->fc4_xpt_flags &= ~NVME_XPT_UNREG_WAIT; spin_unlock_irq(&ndlp->lock); rport->ndlp = NULL; rport->remoteport = NULL; @@ -2488,7 +2488,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * The transport will update it. */ spin_lock_irq(&vport->phba->hbalock); - ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG; + ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; spin_unlock_irq(&vport->phba->hbalock); /* Don't let the host nvme transport keep sending keep-alives diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index 69a5a844c69c..f61223fbe644 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -37,8 +37,8 @@ #define LPFC_MAX_NVME_INFO_TMP_LEN 100 #define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n" -#define lpfc_ndlp_get_nrport(ndlp) \ - ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)) \ +#define lpfc_ndlp_get_nrport(ndlp) \ + ((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\ ? NULL : ndlp->nrport) struct lpfc_nvme_qhandle { diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index f2d9a3580887..6e3dd0b9bcfa 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1797,19 +1797,22 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, if (ctxp->ctxbuf->sglq->sli4_xritag != xri) continue; - spin_lock(&ctxp->ctxlock); + spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, + iflag); + + spin_lock_irqsave(&ctxp->ctxlock, iflag); /* Check if we already received a free context call * and we have completed processing an abort situation. */ if (ctxp->flag & LPFC_NVME_CTX_RLS && !(ctxp->flag & LPFC_NVME_ABORT_OP)) { + spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock); list_del_init(&ctxp->list); + spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock); released = true; } ctxp->flag &= ~LPFC_NVME_XBUSY; - spin_unlock(&ctxp->ctxlock); - spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, - iflag); + spin_unlock_irqrestore(&ctxp->ctxlock, iflag); rrq_empty = list_empty(&phba->active_rrq_list); ndlp = lpfc_findnode_did(phba->pport, ctxp->sid); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1b248c237be1..f905a53d050f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -683,7 +683,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, cpu = raw_smp_processor_id(); if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { - tag = blk_mq_unique_tag(cmnd->request); + tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); idx = blk_mq_unique_tag_to_hwq(tag); } else { idx = phba->sli4_hba.cpu_map[cpu].hdwq; @@ -1046,7 +1046,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, return 0; sgpe = scsi_prot_sglist(sc); - lba = t10_pi_ref_tag(sc->request); + lba = t10_pi_ref_tag(scsi_cmd_to_rq(sc)); if (lba == LPFC_INVALID_REFTAG) return 0; @@ -1629,7 +1629,7 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, goto out; /* extract some info from the scsi command for pde*/ - reftag = t10_pi_ref_tag(sc->request); + reftag = t10_pi_ref_tag(scsi_cmd_to_rq(sc)); if (reftag == LPFC_INVALID_REFTAG) goto out; @@ -1792,7 +1792,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command */ blksize = lpfc_cmd_blksize(sc); - reftag = t10_pi_ref_tag(sc->request); + reftag = t10_pi_ref_tag(scsi_cmd_to_rq(sc)); if (reftag == LPFC_INVALID_REFTAG) goto out; @@ -2023,7 +2023,7 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, goto out; /* extract some info from the scsi command for pde*/ - reftag = t10_pi_ref_tag(sc->request); + reftag = t10_pi_ref_tag(scsi_cmd_to_rq(sc)); if (reftag == LPFC_INVALID_REFTAG) goto out; @@ -2224,7 +2224,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* extract some info from the scsi command */ blksize = lpfc_cmd_blksize(sc); - reftag = t10_pi_ref_tag(sc->request); + reftag = t10_pi_ref_tag(scsi_cmd_to_rq(sc)); if (reftag == LPFC_INVALID_REFTAG) goto out; @@ -2818,7 +2818,7 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) chk_guard = 1; src = (struct scsi_dif_tuple *)sg_virt(sgpe); - start_ref_tag = t10_pi_ref_tag(cmd->request); + start_ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(cmd)); if (start_ref_tag == LPFC_INVALID_REFTAG) goto out; start_app_tag = src->app_tag; @@ -2910,7 +2910,7 @@ out: phba->bg_guard_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", - t10_pi_ref_tag(cmd->request), + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), sum, guard_tag); } else if (err_type == BGS_REFTAG_ERR_MASK) { @@ -2920,7 +2920,7 @@ out: phba->bg_reftag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9066 BLKGRD: reftag %x ref_tag err %x != %x\n", - t10_pi_ref_tag(cmd->request), + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), ref_tag, start_ref_tag); } else if (err_type == BGS_APPTAG_ERR_MASK) { @@ -2930,7 +2930,7 @@ out: phba->bg_apptag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9041 BLKGRD: reftag %x app_tag err %x != %x\n", - t10_pi_ref_tag(cmd->request), + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), app_tag, start_app_tag); } } @@ -2992,7 +2992,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -3007,7 +3007,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -3022,7 +3022,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -3066,7 +3066,7 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, " 0x%x lba 0x%llx blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); /* Calcuate what type of error it was */ lpfc_calc_bg_err(phba, lpfc_cmd); @@ -3103,8 +3103,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9072 BLKGRD: Invalid BG Profile in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); ret = (-1); goto out; } @@ -3115,8 +3115,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9073 BLKGRD: Invalid BG PDIF Block in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); ret = (-1); goto out; } @@ -3131,8 +3131,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9055 BLKGRD: Guard Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -3146,8 +3146,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9056 BLKGRD: Ref Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -3161,8 +3161,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9061 BLKGRD: App Tag error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -3205,8 +3205,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, "9057 BLKGRD: Unknown error in cmd " "0x%x reftag 0x%x blk cnt 0x%x " "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], - t10_pi_ref_tag(cmd->request), - blk_rq_sectors(cmd->request), bgstat, bghm); + t10_pi_ref_tag(scsi_cmd_to_rq(cmd)), + blk_rq_sectors(scsi_cmd_to_rq(cmd)), bgstat, bghm); /* Calcuate what type of error it was */ lpfc_calc_bg_err(phba, lpfc_cmd); @@ -5029,12 +5029,8 @@ lpfc_check_pci_resettable(struct lpfc_hba *phba) } /* Check for valid Emulex Device ID */ - switch (ptr->device) { - case PCI_DEVICE_ID_LANCER_FC: - case PCI_DEVICE_ID_LANCER_G6_FC: - case PCI_DEVICE_ID_LANCER_G7_FC: - break; - default: + if (phba->sli_rev != LPFC_SLI_REV4 || + phba->hba_flag & HBA_FCOE_MODE) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "8347 Incapable PCI reset device: " "0x%04x\n", ptr->device); @@ -5423,13 +5419,9 @@ static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct */ static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) { - char *uuid = NULL; + struct bio *bio = scsi_cmd_to_rq(cmd)->bio; - if (cmd->request) { - if (cmd->request->bio) - uuid = blkcg_get_fc_appid(cmd->request->bio); - } - return uuid; + return bio ? blkcg_get_fc_appid(bio) : NULL; } /** @@ -5557,8 +5549,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) "reftag x%x cnt %u pt %x\n", dif_op_str[scsi_get_prot_op(cmnd)], cmnd->cmnd[0], - t10_pi_ref_tag(cmnd->request), - blk_rq_sectors(cmnd->request), + t10_pi_ref_tag(scsi_cmd_to_rq(cmnd)), + blk_rq_sectors(scsi_cmd_to_rq(cmnd)), (cmnd->cmnd[1]>>5)); } err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); @@ -5569,8 +5561,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) "9038 BLKGRD: rcvd PROT_NORMAL cmd: " "x%x reftag x%x cnt %u pt %x\n", cmnd->cmnd[0], - t10_pi_ref_tag(cmnd->request), - blk_rq_sectors(cmnd->request), + t10_pi_ref_tag(scsi_cmd_to_rq(cmnd)), + blk_rq_sectors(scsi_cmd_to_rq(cmnd)), (cmnd->cmnd[1]>>5)); } err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); @@ -5641,8 +5633,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) bf_get(wqe_tmo, &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) : lpfc_cmd->cur_iocbq.iocb.ulpTimeout, - (uint32_t) - (cmnd->request->timeout / 1000)); + (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000)); goto out_host_busy_free_buf; } @@ -6273,6 +6264,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_scsi_event_header scsi_event; int status; u32 logit = LOG_FCP; + u32 dev_loss_tmo = vport->cfg_devloss_tmo; unsigned long flags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); @@ -6314,39 +6306,44 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id, FCP_TARGET_RESET); - if (status != SUCCESS) - logit = LOG_TRACE_EVENT; - spin_lock_irqsave(&pnode->lock, flags); - if (status != SUCCESS && - (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) && - !pnode->logo_waitq) { - pnode->logo_waitq = &waitq; - pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - pnode->nlp_flag |= NLP_ISSUE_LOGO; - pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; - spin_unlock_irqrestore(&pnode->lock, flags); - lpfc_unreg_rpi(vport, pnode); - wait_event_timeout(waitq, - (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)), - msecs_to_jiffies(vport->cfg_devloss_tmo * - 1000)); - - if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "0725 SCSI layer TGTRST failed & LOGO TMO " - " (%d, %llu) return x%x\n", tgt_id, - lun_id, status); - spin_lock_irqsave(&pnode->lock, flags); - pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; + if (status != SUCCESS) { + logit = LOG_TRACE_EVENT; + + /* Issue LOGO, if no LOGO is outstanding */ + spin_lock_irqsave(&pnode->lock, flags); + if (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO) && + !pnode->logo_waitq) { + pnode->logo_waitq = &waitq; + pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; + pnode->nlp_flag |= NLP_ISSUE_LOGO; + pnode->upcall_flags |= NLP_WAIT_FOR_LOGO; + spin_unlock_irqrestore(&pnode->lock, flags); + lpfc_unreg_rpi(vport, pnode); + wait_event_timeout(waitq, + (!(pnode->upcall_flags & + NLP_WAIT_FOR_LOGO)), + msecs_to_jiffies(dev_loss_tmo * + 1000)); + + if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) { + lpfc_printf_vlog(vport, KERN_ERR, logit, + "0725 SCSI layer TGTRST " + "failed & LOGO TMO (%d, %llu) " + "return x%x\n", + tgt_id, lun_id, status); + spin_lock_irqsave(&pnode->lock, flags); + pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO; + } else { + spin_lock_irqsave(&pnode->lock, flags); + } + pnode->logo_waitq = NULL; + spin_unlock_irqrestore(&pnode->lock, flags); + status = SUCCESS; + } else { - spin_lock_irqsave(&pnode->lock, flags); + spin_unlock_irqrestore(&pnode->lock, flags); + status = FAILED; } - pnode->logo_waitq = NULL; - spin_unlock_irqrestore(&pnode->lock, flags); - status = SUCCESS; - } else { - status = FAILED; - spin_unlock_irqrestore(&pnode->lock, flags); } lpfc_printf_vlog(vport, KERN_ERR, logit, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index f76667b7da7b..3836d7f6a575 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -142,6 +142,10 @@ struct lpfc_scsicmd_bkt { #define FC_PORTSPEED_128GBIT 0x2000 #endif +#ifndef FC_PORTSPEED_256GBIT +#define FC_PORTSPEED_256GBIT 0x4000 +#endif + #define TXRDY_PAYLOAD_LEN 12 /* For sysfs/debugfs tmp string max len */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f530d8fe7a8c..47dd13719901 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5674,16 +5674,20 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); phba->sli4_hba.lnk_info.lnk_no = bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); + phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr); + phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr); memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, sizeof(phba->BIOSVersion)); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", + "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, " + "flash_id: x%02x, asic_rev: x%02x\n", phba->sli4_hba.lnk_info.lnk_tp, phba->sli4_hba.lnk_info.lnk_no, - phba->BIOSVersion); + phba->BIOSVersion, phba->sli4_hba.flash_id, + phba->sli4_hba.asic_rev); out_free_mboxq: if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) lpfc_sli4_mbox_cmd_free(phba, mboxq); @@ -8790,8 +8794,11 @@ static int lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mboxq; int rc = 0; unsigned long timeout = 0; + u32 sli_flag; + u8 cmd, subsys, opcode; /* Mark the asynchronous mailbox command posting as blocked */ spin_lock_irq(&phba->hbalock); @@ -8809,12 +8816,37 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) if (timeout) lpfc_sli4_process_missed_mbox_completions(phba); - /* Wait for the outstnading mailbox command to complete */ + /* Wait for the outstanding mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ msleep(2); if (time_after(jiffies, timeout)) { - /* Timeout, marked the outstanding cmd not complete */ + /* Timeout, mark the outstanding cmd not complete */ + + /* Sanity check sli.mbox_active has not completed or + * cancelled from another context during last 2ms sleep, + * so take hbalock to be sure before logging. + */ + spin_lock_irq(&phba->hbalock); + if (phba->sli.mbox_active) { + mboxq = phba->sli.mbox_active; + cmd = mboxq->u.mb.mbxCommand; + subsys = lpfc_sli_config_mbox_subsys_get(phba, + mboxq); + opcode = lpfc_sli_config_mbox_opcode_get(phba, + mboxq); + sli_flag = psli->sli_flag; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2352 Mailbox command x%x " + "(x%x/x%x) sli_flag x%x could " + "not complete\n", + cmd, subsys, opcode, + sli_flag); + } else { + spin_unlock_irq(&phba->hbalock); + } + rc = 1; break; } @@ -10097,8 +10129,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); - pcmd = (uint32_t *) (((struct lpfc_dmabuf *) - iocbq->context2)->virt); if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, @@ -11619,6 +11649,7 @@ void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1; IOCB_t *irsp = &rspiocb->iocb; /* ELS cmd tag <ulpIoTag> completes */ @@ -11627,11 +11658,16 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "x%x x%x x%x\n", irsp->ulpIoTag, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); - lpfc_nlp_put((struct lpfc_nodelist *)cmdiocb->context1); + /* + * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp + * if exchange is busy. + */ if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) lpfc_ct_free_iocb(phba, cmdiocb); else lpfc_els_free_iocb(phba, cmdiocb); + + lpfc_nlp_put(ndlp); } /** @@ -20021,6 +20057,91 @@ out: } /** + * lpfc_log_fw_write_cmpl - logs firmware write completion status + * @phba: pointer to lpfc hba data structure + * @shdr_status: wr_object rsp's status field + * @shdr_add_status: wr_object rsp's add_status field + * @shdr_add_status_2: wr_object rsp's add_status_2 field + * @shdr_change_status: wr_object rsp's change_status field + * @shdr_csf: wr_object rsp's csf bit + * + * This routine is intended to be called after a firmware write completes. + * It will log next action items to be performed by the user to instantiate + * the newly downloaded firmware or reason for incompatibility. + **/ +static void +lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, + u32 shdr_add_status, u32 shdr_add_status_2, + u32 shdr_change_status, u32 shdr_csf) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "4198 %s: flash_id x%02x, asic_rev x%02x, " + "status x%02x, add_status x%02x, add_status_2 x%02x, " + "change_status x%02x, csf %01x\n", __func__, + phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev, + shdr_status, shdr_add_status, shdr_add_status_2, + shdr_change_status, shdr_csf); + + if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { + switch (shdr_add_status_2) { + case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4199 Firmware write failed: " + "image incompatible with flash x%02x\n", + phba->sli4_hba.flash_id); + break; + case LPFC_ADD_STATUS_2_INCORRECT_ASIC: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4200 Firmware write failed: " + "image incompatible with ASIC " + "architecture x%02x\n", + phba->sli4_hba.asic_rev); + break; + default: + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4210 Firmware write failed: " + "add_status_2 x%02x\n", + shdr_add_status_2); + break; + } + } else if (!shdr_status && !shdr_add_status) { + if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || + shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { + if (shdr_csf) + shdr_change_status = + LPFC_CHANGE_STATUS_PCI_RESET; + } + + switch (shdr_change_status) { + case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3198 Firmware write complete: System " + "reboot required to instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_FW_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3199 Firmware write complete: " + "Firmware reset required to " + "instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_PORT_MIGRATION): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3200 Firmware write complete: Port " + "Migration or PCI Reset required to " + "instantiate\n"); + break; + case (LPFC_CHANGE_STATUS_PCI_RESET): + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3201 Firmware write complete: PCI " + "Reset required to instantiate\n"); + break; + default: + break; + } + } +} + +/** * lpfc_wr_object - write an object to the firmware * @phba: HBA structure that indicates port to create a queue on. * @dmabuf_list: list of dmabufs to write to the port. @@ -20046,7 +20167,8 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, struct lpfc_mbx_wr_object *wr_object; LPFC_MBOXQ_t *mbox; int rc = 0, i = 0; - uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf; + uint32_t shdr_status, shdr_add_status, shdr_add_status_2; + uint32_t shdr_change_status = 0, shdr_csf = 0; uint32_t mbox_tmo; struct lpfc_dmabuf *dmabuf; uint32_t written = 0; @@ -20100,58 +20222,36 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, &wr_object->header.cfg_shdr.response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &wr_object->header.cfg_shdr.response); + shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2, + &wr_object->header.cfg_shdr.response); if (check_change_status) { shdr_change_status = bf_get(lpfc_wr_object_change_status, &wr_object->u.response); - - if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || - shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { - shdr_csf = bf_get(lpfc_wr_object_csf, - &wr_object->u.response); - if (shdr_csf) - shdr_change_status = - LPFC_CHANGE_STATUS_PCI_RESET; - } - - switch (shdr_change_status) { - case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3198 Firmware write complete: System " - "reboot required to instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_FW_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3199 Firmware write complete: Firmware" - " reset required to instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_PORT_MIGRATION): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3200 Firmware write complete: Port " - "Migration or PCI Reset required to " - "instantiate\n"); - break; - case (LPFC_CHANGE_STATUS_PCI_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3201 Firmware write complete: PCI " - "Reset required to instantiate\n"); - break; - default: - break; - } + shdr_csf = bf_get(lpfc_wr_object_csf, + &wr_object->u.response); } + if (!phba->sli4_hba.intr_enable) mempool_free(mbox, phba->mbox_mem_pool); else if (rc != MBX_TIMEOUT) mempool_free(mbox, phba->mbox_mem_pool); - if (shdr_status || shdr_add_status || rc) { + if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3025 Write Object mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); + "status x%x add_status x%x, add_status_2 x%x, " + "mbx status x%x\n", + shdr_status, shdr_add_status, shdr_add_status_2, + rc); rc = -ENXIO; *offset = shdr_add_status; - } else + } else { *offset += wr_object->u.response.actual_write_length; + } + + if (rc || check_change_status) + lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status, + shdr_add_status_2, shdr_change_status, + shdr_csf); return rc; } diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 26f19c95380f..f250b666ac57 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2009-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -978,6 +978,8 @@ struct lpfc_sli4_hba { #define lpfc_conf_trunk_port3_nd_WORD conf_trunk #define lpfc_conf_trunk_port3_nd_SHIFT 7 #define lpfc_conf_trunk_port3_nd_MASK 0x1 + uint8_t flash_id; + uint8_t asic_rev; }; enum lpfc_sge_type { diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 2d62fd2a9824..73a5b3bbdacd 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.8.0.10" +#define LPFC_DRIVER_VERSION "14.0.0.0" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index ec10b2497310..e4298bf4a482 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1451,10 +1451,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, * pthru timeout to the os layer timeout value. */ if (scp->device->type == TYPE_TAPE) { - if ((scp->request->timeout / HZ) > 0xFFFF) + if (scsi_cmd_to_rq(scp)->timeout / HZ > 0xFFFF) pthru->timeout = cpu_to_le16(0xFFFF); else - pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); + pthru->timeout = cpu_to_le16(scsi_cmd_to_rq(scp)->timeout / HZ); } /* diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 06399c026a8d..26d0cf9353dd 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -402,7 +402,7 @@ megasas_get_msix_index(struct megasas_instance *instance, (mega_mod64(atomic64_add_return(1, &instance->total_io_count), instance->msix_vectors)); } else if (instance->host->nr_hw_queues > 1) { - u32 tag = blk_mq_unique_tag(scmd->request); + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) + instance->low_latency_index_start; @@ -3023,7 +3023,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance, io_request->DevHandle = cpu_to_le16(device_id); io_request->LUN[1] = scmd->device->lun; pRAID_Context->timeout_value = - cpu_to_le16 (scmd->request->timeout / HZ); + cpu_to_le16(scsi_cmd_to_rq(scmd)->timeout / HZ); cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); @@ -3086,7 +3086,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, device_id = MEGASAS_DEV_INDEX(scmd); pd_index = MEGASAS_PD_INDEX(scmd); - os_timeout_value = scmd->request->timeout / HZ; + os_timeout_value = scsi_cmd_to_rq(scmd)->timeout / HZ; mr_device_priv_data = scmd->device->hostdata; cmd->pd_interface = mr_device_priv_data->interface_type; @@ -3381,7 +3381,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, return SCSI_MLQUEUE_HOST_BUSY; } - cmd = megasas_get_cmd_fusion(instance, scmd->request->tag); + cmd = megasas_get_cmd_fusion(instance, scsi_cmd_to_rq(scmd)->tag); if (!cmd) { atomic_dec(&instance->fw_outstanding); @@ -3422,7 +3422,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, */ if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { r1_cmd = megasas_get_cmd_fusion(instance, - (scmd->request->tag + instance->max_fw_cmds)); + scsi_cmd_to_rq(scmd)->tag + instance->max_fw_cmds); megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd); } diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 24ac7ddec749..bc1c32f599de 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -50,7 +50,7 @@ static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc, u32 unique_tag; u16 host_tag, hw_queue; - unique_tag = blk_mq_unique_tag(scmd->request); + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); hw_queue = blk_mq_unique_tag_to_hwq(unique_tag); if (hw_queue >= mrioc->num_op_reply_q) @@ -2016,7 +2016,7 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, case SCSI_PROT_DIF_TYPE0: eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG; scsiio_req->cdb.eedp32.primary_reference_tag = - cpu_to_be32(t10_pi_ref_tag(scmd->request)); + cpu_to_be32(t10_pi_ref_tag(scsi_cmd_to_rq(scmd))); break; case SCSI_PROT_DIF_TYPE1: case SCSI_PROT_DIF_TYPE2: @@ -2024,7 +2024,7 @@ static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc, MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE | MPI3_EEDPFLAGS_CHK_GUARD; scsiio_req->cdb.eedp32.primary_reference_tag = - cpu_to_be32(t10_pi_ref_tag(scmd->request)); + cpu_to_be32(t10_pi_ref_tag(scsi_cmd_to_rq(scmd))); break; case SCSI_PROT_DIF_TYPE3: eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD | @@ -3451,7 +3451,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost, u16 dev_handle; u16 host_tag; u32 scsiio_flags = 0; - struct request *rq = scmd->request; + struct request *rq = scsi_cmd_to_rq(scmd); int iprio_class; sdev_priv_data = scmd->device->hostdata; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 19b1c0cf5f2a..2528c4e7b0e0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -116,6 +116,14 @@ MODULE_PARM_DESC(perf_mode, "\t\tdefault - default perf_mode is 'balanced'" ); +static int poll_queues; +module_param(poll_queues, int, 0444); +MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t" + "This parameter is effective only if host_tagset_enable=1. &\n\t\t" + "when poll_queues are enabled then &\n\t\t" + "perf_mode is set to latency mode. &\n\t\t" + ); + enum mpt3sas_perf_mode { MPT_PERF_MODE_DEFAULT = -1, MPT_PERF_MODE_BALANCED = 0, @@ -709,6 +717,7 @@ _base_fault_reset_work(struct work_struct *work) * and this call is safe since dead ioc will never return any * command back from HW. */ + mpt3sas_base_pause_mq_polling(ioc); ioc->schedule_dead_ioc_flush_running_cmds(ioc); /* * Set remove_host flag early since kernel thread will @@ -744,6 +753,7 @@ _base_fault_reset_work(struct work_struct *work) spin_unlock_irqrestore( &ioc->ioc_reset_in_progress_lock, flags); mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_pause_mq_polling(ioc); _base_clear_outstanding_commands(ioc); } @@ -1548,6 +1558,53 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) } /** + * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues + * when driver is flushing out the IOs. + * @ioc: per adapter object + * + * Pause polling on the mq poll (io uring) queues when driver is flushing + * out the IOs. Otherwise we may see the race condition of completing the same + * IO from two paths. + * + * Returns nothing. + */ +void +mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc) +{ + int iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1); + + /* + * wait for current poll to complete. + */ + for (qid = 0; qid < iopoll_q_count; qid++) { + while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) + udelay(500); + } +} + +/** + * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues. + * @ioc: per adapter object + * + * Returns nothing. + */ +void +mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc) +{ + int iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + int qid; + + for (qid = 0; qid < iopoll_q_count; qid++) + atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0); +} + +/** * mpt3sas_base_mask_interrupts - disable interrupts * @ioc: per adapter object * @@ -1722,7 +1779,8 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); } - if (!reply_q->irq_poll_scheduled) { + if (!reply_q->is_iouring_poll_q && + !reply_q->irq_poll_scheduled) { reply_q->irq_poll_scheduled = true; irq_poll_sched(&reply_q->irqpoll); } @@ -1779,6 +1837,33 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q) } /** + * mpt3sas_blk_mq_poll - poll the blk mq poll queue + * @shost: Scsi_Host object + * @queue_num: hw ctx queue number + * + * Return number of entries that has been processed from poll queue. + */ +int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num) +{ + struct MPT3SAS_ADAPTER *ioc = + (struct MPT3SAS_ADAPTER *)shost->hostdata; + struct adapter_reply_queue *reply_q; + int num_entries = 0; + int qid = queue_num - ioc->iopoll_q_start_index; + + if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) || + !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1)) + return 0; + + reply_q = ioc->io_uring_poll_queues[qid].reply_q; + + num_entries = _base_process_reply_queue(reply_q); + atomic_dec(&ioc->io_uring_poll_queues[qid].busy); + + return num_entries; +} + +/** * _base_interrupt - MPT adapter (IOC) specific interrupt handler. * @irq: irq number (not used) * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure @@ -1851,6 +1936,8 @@ _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc) return; list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + if (reply_q->is_iouring_poll_q) + continue; irq_poll_init(&reply_q->irqpoll, ioc->hba_queue_depth/4, _base_irqpoll); reply_q->irq_poll_scheduled = false; @@ -1900,6 +1987,12 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll) /* TMs are on msix_index == 0 */ if (reply_q->msix_index == 0) continue; + + if (reply_q->is_iouring_poll_q) { + _base_process_reply_queue(reply_q); + continue; + } + synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index)); if (reply_q->irq_poll_scheduled) { /* Calling irq_poll_disable will wait for any pending @@ -2998,6 +3091,11 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { list_del(&reply_q->list); + if (reply_q->is_iouring_poll_q) { + kfree(reply_q); + continue; + } + if (ioc->smp_affinity_enable) irq_set_affinity_hint(pci_irq_vector(ioc->pdev, reply_q->msix_index), NULL); @@ -3019,7 +3117,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) { struct pci_dev *pdev = ioc->pdev; struct adapter_reply_queue *reply_q; - int r; + int r, qid; reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); if (!reply_q) { @@ -3031,6 +3129,17 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) reply_q->msix_index = index; atomic_set(&reply_q->busy, 0); + + if (index >= ioc->iopoll_q_start_index) { + qid = index - ioc->iopoll_q_start_index; + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d", + ioc->driver_name, ioc->id, qid); + reply_q->is_iouring_poll_q = 1; + ioc->io_uring_poll_queues[qid].reply_q = reply_q; + goto out; + } + + if (ioc->msix_enable) snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", ioc->driver_name, ioc->id, index); @@ -3045,7 +3154,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index) kfree(reply_q); return -EBUSY; } - +out: INIT_LIST_HEAD(&reply_q->list); list_add_tail(&reply_q->list, &ioc->reply_queue_list); return 0; @@ -3066,6 +3175,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) unsigned int cpu, nr_cpus, nr_msix, index = 0; struct adapter_reply_queue *reply_q; int local_numa_node; + int iopoll_q_count = ioc->reply_queue_count - + ioc->iopoll_q_start_index; if (!_base_is_controller_msix_enabled(ioc)) return; @@ -3099,7 +3210,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { const cpumask_t *mask; - if (reply_q->msix_index < ioc->high_iops_queues) + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) continue; mask = pci_irq_get_affinity(ioc->pdev, @@ -3121,13 +3233,14 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) fall_back: cpu = cpumask_first(cpu_online_mask); - nr_msix -= ioc->high_iops_queues; + nr_msix -= (ioc->high_iops_queues - iopoll_q_count); index = 0; list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { unsigned int i, group = nr_cpus / nr_msix; - if (reply_q->msix_index < ioc->high_iops_queues) + if (reply_q->msix_index < ioc->high_iops_queues || + reply_q->msix_index >= ioc->iopoll_q_start_index) continue; if (cpu >= nr_cpus) @@ -3164,8 +3277,12 @@ _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc, { u16 lnksta, speed; + /* + * Disable high iops queues if io uring poll queues are enabled. + */ if (perf_mode == MPT_PERF_MODE_IOPS || - perf_mode == MPT_PERF_MODE_LATENCY) { + perf_mode == MPT_PERF_MODE_LATENCY || + ioc->io_uring_poll_queues) { ioc->high_iops_queues = 0; return; } @@ -3202,6 +3319,7 @@ mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) return; pci_free_irq_vectors(ioc->pdev); ioc->msix_enable = 0; + kfree(ioc->io_uring_poll_queues); } /** @@ -3215,18 +3333,24 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) int i, irq_flags = PCI_IRQ_MSIX; struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues }; struct irq_affinity *descp = &desc; + /* + * Don't allocate msix vectors for poll_queues. + * msix_vectors is always within a range of FW supported reply queue. + */ + int nr_msix_vectors = ioc->iopoll_q_start_index; + if (ioc->smp_affinity_enable) - irq_flags |= PCI_IRQ_AFFINITY; + irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES; else descp = NULL; - ioc_info(ioc, " %d %d\n", ioc->high_iops_queues, - ioc->reply_queue_count); + ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues, + ioc->reply_queue_count, nr_msix_vectors); i = pci_alloc_irq_vectors_affinity(ioc->pdev, ioc->high_iops_queues, - ioc->reply_queue_count, irq_flags, descp); + nr_msix_vectors, irq_flags, descp); return i; } @@ -3242,6 +3366,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) int r; int i, local_max_msix_vectors; u8 try_msix = 0; + int iopoll_q_count = 0; ioc->msix_load_balance = false; @@ -3257,22 +3382,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count); pr_info("\t no of cores: %d, max_msix_vectors: %d\n", ioc->cpu_count, max_msix_vectors); - if (ioc->is_aero_ioc) - _base_check_and_enable_high_iops_queues(ioc, - ioc->msix_vector_count); + ioc->reply_queue_count = - min_t(int, ioc->cpu_count + ioc->high_iops_queues, - ioc->msix_vector_count); + min_t(int, ioc->cpu_count, ioc->msix_vector_count); if (!ioc->rdpq_array_enable && max_msix_vectors == -1) local_max_msix_vectors = (reset_devices) ? 1 : 8; else local_max_msix_vectors = max_msix_vectors; - if (local_max_msix_vectors > 0) - ioc->reply_queue_count = min_t(int, local_max_msix_vectors, - ioc->reply_queue_count); - else if (local_max_msix_vectors == 0) + if (local_max_msix_vectors == 0) goto try_ioapic; /* @@ -3293,14 +3412,77 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) if (ioc->msix_load_balance) ioc->smp_affinity_enable = 0; + if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1) + ioc->shost->host_tagset = 0; + + /* + * Enable io uring poll queues only if host_tagset is enabled. + */ + if (ioc->shost->host_tagset) + iopoll_q_count = poll_queues; + + if (iopoll_q_count) { + ioc->io_uring_poll_queues = kcalloc(iopoll_q_count, + sizeof(struct io_uring_poll_queue), GFP_KERNEL); + if (!ioc->io_uring_poll_queues) + iopoll_q_count = 0; + } + + if (ioc->is_aero_ioc) + _base_check_and_enable_high_iops_queues(ioc, + ioc->msix_vector_count); + + /* + * Add high iops queues count to reply queue count if high iops queues + * are enabled. + */ + ioc->reply_queue_count = min_t(int, + ioc->reply_queue_count + ioc->high_iops_queues, + ioc->msix_vector_count); + + /* + * Adjust the reply queue count incase reply queue count + * exceeds the user provided MSIx vectors count. + */ + if (local_max_msix_vectors > 0) + ioc->reply_queue_count = min_t(int, local_max_msix_vectors, + ioc->reply_queue_count); + /* + * Add io uring poll queues count to reply queues count + * if io uring is enabled in driver. + */ + if (iopoll_q_count) { + if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS)) + iopoll_q_count = 0; + ioc->reply_queue_count = min_t(int, + ioc->reply_queue_count + iopoll_q_count, + ioc->msix_vector_count); + } + + /* + * Starting index of io uring poll queues in reply queue list. + */ + ioc->iopoll_q_start_index = + ioc->reply_queue_count - iopoll_q_count; + r = _base_alloc_irq_vectors(ioc); if (r < 0) { ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r); goto try_ioapic; } + /* + * Adjust the reply queue count if the allocated + * MSIx vectors is less then the requested number + * of MSIx vectors. + */ + if (r < ioc->iopoll_q_start_index) { + ioc->reply_queue_count = r + iopoll_q_count; + ioc->iopoll_q_start_index = + ioc->reply_queue_count - iopoll_q_count; + } + ioc->msix_enable = 1; - ioc->reply_queue_count = r; for (i = 0; i < ioc->reply_queue_count; i++) { r = _base_request_irq(ioc, i); if (r) { @@ -3320,6 +3502,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) ioc->high_iops_queues = 0; ioc_info(ioc, "High IOPs queues : disabled\n"); ioc->reply_queue_count = 1; + ioc->iopoll_q_start_index = ioc->reply_queue_count - 0; r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY); if (r < 0) { dfailprintk(ioc, @@ -3416,6 +3599,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) u64 pio_chip = 0; phys_addr_t chip_phys = 0; struct adapter_reply_queue *reply_q; + int iopoll_q_count = 0; dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -3489,6 +3673,12 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) if (r) goto out_fail; + iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index; + for (i = 0; i < iopoll_q_count; i++) { + atomic_set(&ioc->io_uring_poll_queues[i].busy, 0); + atomic_set(&ioc->io_uring_poll_queues[i].pause, 0); + } + if (!ioc->is_driver_loading) _base_init_irqpolls(ioc); /* Use the Combined reply queue feature only for SAS3 C0 & higher @@ -3530,11 +3720,18 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) * 4))); } - list_for_each_entry(reply_q, &ioc->reply_queue_list, list) + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (reply_q->msix_index >= ioc->iopoll_q_start_index) { + pr_info("%s: enabled: index: %d\n", + reply_q->name, reply_q->msix_index); + continue; + } + pr_info("%s: %s enabled: IRQ %d\n", reply_q->name, ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC", pci_irq_vector(ioc->pdev, reply_q->msix_index)); + } ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n", &chip_phys, ioc->chip, memap_sz); @@ -3651,7 +3848,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, &ioc->total_io_cnt), ioc->reply_queue_count) : 0; if (scmd && ioc->shost->nr_hw_queues > 1) { - u32 tag = blk_mq_unique_tag(scmd->request); + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); return blk_mq_unique_tag_to_hwq(tag) + ioc->high_iops_queues; @@ -3735,7 +3932,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, u16 smid; u32 tag, unique_tag; - unique_tag = blk_mq_unique_tag(scmd->request); + unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); tag = blk_mq_unique_tag_to_tag(unique_tag); /* @@ -5169,6 +5366,73 @@ _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc) } /** + * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices. + * - On failure set default QD values. + * @ioc : per adapter object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; + int sz; + int rc = 0; + + ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH; + ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH; + if (!ioc->is_gen35_ioc) + goto out; + /* sas iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return rc; + } + rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz); + if (rc) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_wideport_qd = + (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : + MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_narrowport_qd = + (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? + le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) : + MPT3SAS_SAS_QUEUE_DEPTH; + ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? + sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH; + /* pcie iounit page 1 */ + rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, + &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); + if (rc) { + pr_err("%s: failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ? + (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) : + MPT3SAS_NVME_QUEUE_DEPTH; +out: + dinitprintk(ioc, pr_err( + "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n", + ioc->max_wideport_qd, ioc->max_narrowport_qd, + ioc->max_sata_qd, ioc->max_nvme_qd)); + kfree(sas_iounit_pg1); + return rc; +} + +/** * _base_static_config_pages - static start of day config pages * @ioc: per adapter object */ @@ -5237,6 +5501,9 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) ioc_warn(ioc, "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n"); } + rc = _base_assign_fw_reported_qd(ioc); + if (rc) + return rc; rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); if (rc) return rc; @@ -8471,6 +8738,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, _base_pre_reset_handler(ioc); mpt3sas_wait_for_commands_to_complete(ioc); mpt3sas_base_mask_interrupts(ioc); + mpt3sas_base_pause_mq_polling(ioc); r = mpt3sas_base_make_ioc_ready(ioc, type); if (r) goto out; @@ -8512,6 +8780,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); ioc->ioc_reset_count++; mutex_unlock(&ioc->reset_in_progress_mutex); + mpt3sas_base_resume_mq_polling(ioc); out_unlocked: if ((r == 0) && is_trigger) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 0c6c3df0038d..f87c0911f66a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -77,9 +77,9 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "37.101.00.00" -#define MPT3SAS_MAJOR_VERSION 37 -#define MPT3SAS_MINOR_VERSION 101 +#define MPT3SAS_DRIVER_VERSION "39.100.00.00" +#define MPT3SAS_MAJOR_VERSION 39 +#define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -354,6 +354,7 @@ struct mpt3sas_nvme_cmd { #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12 #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16 #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) +#define MPT3_MIN_IRQS 1 /* OEM Identifiers */ #define MFG10_OEM_ID_INVALID (0x00000000) @@ -575,6 +576,7 @@ struct _sas_device { u8 is_chassis_slot_valid; u8 connector_name[5]; struct kref refcount; + u8 port_type; struct hba_port *port; struct sas_rphy *rphy; }; @@ -936,6 +938,8 @@ struct _event_ack_list { * @os_irq: irq number * @irqpoll: irq_poll object * @irq_poll_scheduled: Tells whether irq poll is scheduled or not + * @is_iouring_poll_q: Tells whether reply queues is assigned + * to io uring poll queues or not * @list: this list */ struct adapter_reply_queue { @@ -949,9 +953,22 @@ struct adapter_reply_queue { struct irq_poll irqpoll; bool irq_poll_scheduled; bool irq_line_enable; + bool is_iouring_poll_q; struct list_head list; }; +/** + * struct io_uring_poll_queue - the io uring poll queue structure + * @busy: Tells whether io uring poll queue is busy or not + * @pause: Tells whether IOs are paused on io uring poll queue or not + * @reply_q: reply queue mapped for io uring poll queue + */ +struct io_uring_poll_queue { + atomic_t busy; + atomic_t pause; + struct adapter_reply_queue *reply_q; +}; + typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); /* SAS3.0 support */ @@ -1176,6 +1193,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands * @thresh_hold: Max number of reply descriptors processed * before updating Host Index + * @iopoll_q_start_index: starting index of io uring poll queues + * in reply queue list * @drv_internal_flags: Bit map internal to driver * @drv_support_bitmap: driver's supported feature bit map * @use_32bit_dma: Flag to use 32 bit consistent dma mask @@ -1372,11 +1391,13 @@ struct MPT3SAS_ADAPTER { bool msix_load_balance; u16 thresh_hold; u8 high_iops_queues; + u8 iopoll_q_start_index; u32 drv_internal_flags; u32 drv_support_bitmap; u32 dma_mask; bool enable_sdev_max_qd; bool use_32bit_dma; + struct io_uring_poll_queue *io_uring_poll_queues; /* internal commands, callback index */ u8 scsi_io_cb_idx; @@ -1423,6 +1444,10 @@ struct MPT3SAS_ADAPTER { u8 tm_custom_handling; u8 nvme_abort_timeout; u16 max_shutdown_latency; + u16 max_wideport_qd; + u16 max_narrowport_qd; + u16 max_nvme_qd; + u8 max_sata_qd; /* static config pages */ struct mpt3sas_facts facts; @@ -1730,10 +1755,12 @@ do { ioc_err(ioc, "In func: %s\n", __func__); \ status, mpi_request, sz); } while (0) int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count); -int -mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); +int mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type); void mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num); +void mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc); /* scsih shared API */ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, @@ -1829,6 +1856,9 @@ int mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, int mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeDevicePage2_t *config_page, u32 form, u32 handle); +int mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page, + u16 sz); int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, u16 sz); diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 83a5c2172ad4..0563078227de 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -1169,6 +1169,43 @@ out: } /** + * mpt3sas_config_get_pcie_iounit_pg1 - obtain pcie iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_pcie_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi26PCIeIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_PCIE_IO_UNIT; + mpi_request.Header.PageVersion = MPI26_PCIEIOUNITPAGE1_PAGEVERSION; + mpi_request.Header.PageNumber = 1; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); +out: + return r; +} + +/** * mpt3sas_config_get_pcie_device_pg2 - obtain pcie device page 2 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index b66140e4c370..770b241d7bb2 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -3820,9 +3820,10 @@ enable_sdev_max_qd_store(struct device *cdev, } } else if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) - qdepth = MPT3SAS_NVME_QUEUE_DEPTH; + qdepth = ioc->max_nvme_qd; else - qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + qdepth = (sas_target_priv_data->sas_dev->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; mpt3sas_scsih_change_queue_depth(sdev, qdepth); } @@ -3919,6 +3920,24 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(sas_device_handle); /** + * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority + * @dev: pointer to embedded device + * @attr: sas_ncq_prio_supported attribute descriptor + * @buf: the buffer returned + * + * A sysfs 'read-only' sdev attribute, only works with SATA + */ +static ssize_t +sas_ncq_prio_supported_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev)); +} +static DEVICE_ATTR_RO(sas_ncq_prio_supported); + +/** * sas_ncq_prio_enable_show - send prioritized io commands to device * @dev: pointer to embedded device * @attr: ? @@ -3960,6 +3979,7 @@ static DEVICE_ATTR_RW(sas_ncq_prio_enable); struct device_attribute *mpt3sas_dev_attrs[] = { &dev_attr_sas_address, &dev_attr_sas_device_handle, + &dev_attr_sas_ncq_prio_supported, &dev_attr_sas_ncq_prio_enable, NULL, }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8e64a6f14542..cbc7b3d9d913 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -1803,7 +1803,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) * limit max device queue for SATA to 32 if enable_sdev_max_qd * is disabled. */ - if (ioc->enable_sdev_max_qd) + if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc) goto not_sata; sas_device_priv_data = sdev->hostdata; @@ -2657,7 +2657,7 @@ scsih_slave_configure(struct scsi_device *sdev) return 1; } - qdepth = MPT3SAS_NVME_QUEUE_DEPTH; + qdepth = ioc->max_nvme_qd; ds = "NVMe"; sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n", @@ -2709,7 +2709,8 @@ scsih_slave_configure(struct scsi_device *sdev) sas_device->volume_handle = volume_handle; sas_device->volume_wwid = volume_wwid; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { - qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; ssp_target = 1; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SEP) { @@ -2721,7 +2722,7 @@ scsih_slave_configure(struct scsi_device *sdev) } else ds = "SSP"; } else { - qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + qdepth = ioc->max_sata_qd; if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) ds = "STP"; else if (sas_device->device_info & @@ -3304,7 +3305,7 @@ scsih_abort(struct scsi_cmnd *scmd) sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), - (scmd->request->timeout / HZ) * 1000); + (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; @@ -5074,7 +5075,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; mpi_request->CDB.EEDP32.PrimaryReferenceTag = - cpu_to_be32(t10_pi_ref_tag(scmd->request)); + cpu_to_be32(t10_pi_ref_tag(scsi_cmd_to_rq(scmd))); break; case SCSI_PROT_DIF_TYPE3: @@ -5141,7 +5142,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; struct _raid_device *raid_device; - struct request *rq = scmd->request; + struct request *rq = scsi_cmd_to_rq(scmd); int class; Mpi25SCSIIORequest_t *mpi_request; struct _pcie_device *pcie_device = NULL; @@ -7371,6 +7372,10 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, /* get device name */ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + sas_device->port_type = sas_device_pg0.MaxPortConnections; + ioc_info(ioc, + "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n", + handle, sas_device->sas_address, sas_device->port_type); if (ioc->wait_for_discovery_to_complete) _scsih_sas_device_init_add(ioc, sas_device); @@ -9604,6 +9609,42 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) } /** + * _scsih_update_device_qdepth - Update QD during Reset. + * @ioc: per adapter object + * + */ +static void +_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct scsi_device *sdev; + u16 qdepth; + + ioc_info(ioc, "Update devices with firmware reported queue depth\n"); + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) { + sas_target_priv_data = sas_device_priv_data->sas_target; + sas_device = sas_device_priv_data->sas_target->sas_dev; + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) + qdepth = ioc->max_nvme_qd; + else if (sas_device && + sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = (sas_device->port_type > 1) ? + ioc->max_wideport_qd : ioc->max_narrowport_qd; + else if (sas_device && + sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + qdepth = ioc->max_sata_qd; + else + continue; + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + } + } +} + +/** * _scsih_mark_responding_sas_device - mark a sas_devices as responding * @ioc: per adapter object * @sas_device_pg0: SAS Device page 0 @@ -10654,6 +10695,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) _scsih_remove_unresponding_devices(ioc); _scsih_del_dirty_vphy(ioc); _scsih_del_dirty_port_entries(ioc); + if (ioc->is_gen35_ioc) + _scsih_update_device_qdepth(ioc); _scsih_scan_for_devices_after_reset(ioc); /* * If diag reset has occurred during the driver load @@ -11178,8 +11221,10 @@ static void scsih_remove(struct pci_dev *pdev) ioc->remove_host = 1; - if (!pci_device_is_present(pdev)) + if (!pci_device_is_present(pdev)) { + mpt3sas_base_pause_mq_polling(ioc); _scsih_flush_running_cmds(ioc); + } _scsih_fw_event_cleanup_queue(ioc); @@ -11274,8 +11319,10 @@ scsih_shutdown(struct pci_dev *pdev) ioc->remove_host = 1; - if (!pci_device_is_present(pdev)) + if (!pci_device_is_present(pdev)) { + mpt3sas_base_pause_mq_polling(ioc); _scsih_flush_running_cmds(ioc); + } _scsih_fw_event_cleanup_queue(ioc); @@ -11785,12 +11832,41 @@ static int scsih_map_queues(struct Scsi_Host *shost) { struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)shost->hostdata; + struct blk_mq_queue_map *map; + int i, qoff, offset; + int nr_msix_vectors = ioc->iopoll_q_start_index; + int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors; - if (ioc->shost->nr_hw_queues == 1) + if (shost->nr_hw_queues == 1) return 0; - return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - ioc->pdev, ioc->high_iops_queues); + for (i = 0, qoff = 0; i < shost->nr_maps; i++) { + map = &shost->tag_set.map[i]; + map->nr_queues = 0; + offset = 0; + if (i == HCTX_TYPE_DEFAULT) { + map->nr_queues = + nr_msix_vectors - ioc->high_iops_queues; + offset = ioc->high_iops_queues; + } else if (i == HCTX_TYPE_POLL) + map->nr_queues = iopoll_q_count; + + if (!map->nr_queues) + BUG_ON(i == HCTX_TYPE_DEFAULT); + + /* + * The poll queue(s) doesn't have an IRQ (and hence IRQ + * affinity), so use the regular blk-mq cpu mapping + */ + map->queue_offset = qoff; + if (i != HCTX_TYPE_POLL) + blk_mq_pci_map_queues(map, ioc->pdev, offset); + else + blk_mq_map_queues(map); + + qoff += map->nr_queues; + } + return 0; } /* shost template for SAS 2.0 HBA devices */ @@ -11861,6 +11937,7 @@ static struct scsi_host_template mpt3sas_driver_template = { .track_queue_depth = 1, .cmd_size = sizeof(struct scsiio_tracker), .map_queues = scsih_map_queues, + .mq_poll = mpt3sas_blk_mq_poll, }; /* raid transport support for SAS 3.0 HBA devices */ @@ -11957,6 +12034,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct Scsi_Host *shost = NULL; int rv; u16 hba_mpi_version; + int iopoll_q_count = 0; /* Determine in which MPI version class this pci device belongs */ hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); @@ -12204,6 +12282,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_thread_fail; } + shost->host_tagset = 0; + + if (ioc->is_gen35_ioc && host_tagset_enable) + shost->host_tagset = 1; + ioc->is_driver_loading = 1; if ((mpt3sas_base_attach(ioc))) { ioc_err(ioc, "failure at %s:%d/%s()!\n", @@ -12226,16 +12309,17 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) } else ioc->hide_drives = 0; - shost->host_tagset = 0; shost->nr_hw_queues = 1; - if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 && - host_tagset_enable && ioc->smp_affinity_enable) { - - shost->host_tagset = 1; + if (shost->host_tagset) { shost->nr_hw_queues = ioc->reply_queue_count - ioc->high_iops_queues; + iopoll_q_count = + ioc->reply_queue_count - ioc->iopoll_q_start_index; + + shost->nr_maps = iopoll_q_count ? 3 : 1; + dev_info(&ioc->pdev->dev, "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n", shost->can_queue, shost->nr_hw_queues); @@ -12359,6 +12443,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) /* Permanent error, prepare for device removal */ ioc->pci_error_recovery = 1; mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_pause_mq_polling(ioc); _scsih_flush_running_cmds(ioc); return PCI_ERS_RESULT_DISCONNECT; } diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 6bb03d7a254d..4d251bf630a3 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -702,7 +702,7 @@ static int mvumi_host_reset(struct scsi_cmnd *scmd) mhba = (struct mvumi_hba *) scmd->device->host->hostdata; scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n", - scmd->request->tag, scmd->cmnd[0], scmd->retries); + scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries); return mhba->instancet->reset_host(mhba); } diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index 542ed88ef90d..a4a88323e020 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -1263,6 +1263,7 @@ static int myrb_host_reset(struct scsi_cmnd *scmd) static int myrb_pthru_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { + struct request *rq = scsi_cmd_to_rq(scmd); struct myrb_hba *cb = shost_priv(shost); struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd); union myrb_cmd_mbox *mbox = &cmd_blk->mbox; @@ -1286,7 +1287,7 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost, } mbox->type3.opcode = MYRB_CMD_DCDB; - mbox->type3.id = scmd->request->tag + 3; + mbox->type3.id = rq->tag + 3; mbox->type3.addr = dcdb_addr; dcdb->channel = sdev->channel; dcdb->target = sdev->id; @@ -1305,11 +1306,11 @@ static int myrb_pthru_queuecommand(struct Scsi_Host *shost, break; } dcdb->early_status = false; - if (scmd->request->timeout <= 10) + if (rq->timeout <= 10) dcdb->timeout = MYRB_DCDB_TMO_10_SECS; - else if (scmd->request->timeout <= 60) + else if (rq->timeout <= 60) dcdb->timeout = MYRB_DCDB_TMO_60_SECS; - else if (scmd->request->timeout <= 600) + else if (rq->timeout <= 600) dcdb->timeout = MYRB_DCDB_TMO_10_MINS; else dcdb->timeout = MYRB_DCDB_TMO_24_HRS; @@ -1550,7 +1551,7 @@ static int myrb_ldev_queuecommand(struct Scsi_Host *shost, } myrb_reset_cmd(cmd_blk); - mbox->type5.id = scmd->request->tag + 3; + mbox->type5.id = scsi_cmd_to_rq(scmd)->tag + 3; if (scmd->sc_data_direction == DMA_NONE) goto submit; nsge = scsi_dma_map(scmd); diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index 26326af23dbc..07f274afd7e5 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -1582,6 +1582,7 @@ static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd, static int myrs_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { + struct request *rq = scsi_cmd_to_rq(scmd); struct myrs_hba *cs = shost_priv(shost); struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd); union myrs_cmd_mbox *mbox = &cmd_blk->mbox; @@ -1628,7 +1629,7 @@ static int myrs_queuecommand(struct Scsi_Host *shost, return SCSI_MLQUEUE_HOST_BUSY; cmd_blk->sense_addr = sense_addr; - timeout = scmd->request->timeout; + timeout = rq->timeout; if (scmd->cmd_len <= 10) { if (scmd->device->channel >= cs->ctlr_info->physchan_present) { struct myrs_ldev_info *ldev_info = sdev->hostdata; @@ -1644,10 +1645,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost, mbox->SCSI_10.pdev.target = sdev->id; mbox->SCSI_10.pdev.channel = sdev->channel; } - mbox->SCSI_10.id = scmd->request->tag + 3; + mbox->SCSI_10.id = rq->tag + 3; mbox->SCSI_10.control.dma_ctrl_to_host = (scmd->sc_data_direction == DMA_FROM_DEVICE); - if (scmd->request->cmd_flags & REQ_FUA) + if (rq->cmd_flags & REQ_FUA) mbox->SCSI_10.control.fua = true; mbox->SCSI_10.dma_size = scsi_bufflen(scmd); mbox->SCSI_10.sense_addr = cmd_blk->sense_addr; @@ -1690,10 +1691,10 @@ static int myrs_queuecommand(struct Scsi_Host *shost, mbox->SCSI_255.pdev.target = sdev->id; mbox->SCSI_255.pdev.channel = sdev->channel; } - mbox->SCSI_255.id = scmd->request->tag + 3; + mbox->SCSI_255.id = rq->tag + 3; mbox->SCSI_255.control.dma_ctrl_to_host = (scmd->sc_data_direction == DMA_FROM_DEVICE); - if (scmd->request->cmd_flags & REQ_FUA) + if (rq->cmd_flags & REQ_FUA) mbox->SCSI_255.control.fua = true; mbox->SCSI_255.dma_size = scsi_bufflen(scmd); mbox->SCSI_255.sense_addr = cmd_blk->sense_addr; diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index c76e9f05d042..09958f78b70f 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -4164,8 +4164,8 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd) ** **---------------------------------------------------- */ - if (np->settle_time && cmd->request->timeout >= HZ) { - u_long tlimit = jiffies + cmd->request->timeout - HZ; + if (np->settle_time && scsi_cmd_to_rq(cmd)->timeout >= HZ) { + u_long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout - HZ; if (time_after(np->settle_time, tlimit)) np->settle_time = tlimit; } diff --git a/drivers/scsi/pcmcia/fdomain_cs.c b/drivers/scsi/pcmcia/fdomain_cs.c index e42acf314d06..33df6a9ba9b5 100644 --- a/drivers/scsi/pcmcia/fdomain_cs.c +++ b/drivers/scsi/pcmcia/fdomain_cs.c @@ -45,8 +45,10 @@ static int fdomain_probe(struct pcmcia_device *link) goto fail_disable; if (!request_region(link->resource[0]->start, FDOMAIN_REGION_SIZE, - "fdomain_cs")) + "fdomain_cs")) { + ret = -EBUSY; goto fail_disable; + } sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev); if (!sh) { diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 17c0f26e683a..63690508313b 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1323,7 +1323,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, void *pMessage; unsigned long flags; int q_index = circularQ - pm8001_ha->inbnd_q_tbl; - int rv = -1; + int rv; WARN_ON(q_index >= PM8001_MAX_INB_NUM); spin_lock_irqsave(&circularQ->iq_lock, flags); diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index 6b5b6a75ac88..3404782988d5 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -1162,13 +1162,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, return; } - if (!sc_cmd->request) { - QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, " - "sc_cmd=%p.\n", sc_cmd); - return; - } - - if (!sc_cmd->request->q) { + if (!scsi_cmd_to_rq(sc_cmd)->q) { QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request " "is not valid, sc_cmd=%p.\n", sc_cmd); return; diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 71333d3c5c86..ac99e980bb31 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -609,14 +609,7 @@ static void qedi_scsi_completion(struct qedi_ctx *qedi, goto error; } - if (!sc_cmd->request) { - QEDI_WARN(&qedi->dbg_ctx, - "sc_cmd->request is NULL, sc_cmd=%p.\n", - sc_cmd); - goto error; - } - - if (!sc_cmd->request->q) { + if (!scsi_cmd_to_rq(sc_cmd)->q) { QEDI_WARN(&qedi->dbg_ctx, "request->q is NULL so request is not valid, sc_cmd=%p.\n", sc_cmd); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 928da90b79be..9f9b4900c3ab 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -490,7 +490,6 @@ __setup("qla1280=", qla1280_setup); #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE #define CMD_RESULT(Cmnd) Cmnd->result #define CMD_HANDLE(Cmnd) Cmnd->host_scribble -#define CMD_REQUEST(Cmnd) Cmnd->request->cmd #define CMD_HOST(Cmnd) Cmnd->device->host #define SCSI_BUS_32(Cmnd) Cmnd->device->channel @@ -2827,7 +2826,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); /* Set ISP command timeout. */ - pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); + pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ); /* Set device target ID and LUN */ pkt->lun = SCSI_LUN_32(cmd); @@ -3082,7 +3081,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8)); /* Set ISP command timeout. */ - pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ); + pkt->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ); /* Set device target ID and LUN */ pkt->lun = SCSI_LUN_32(cmd); diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile index 17d5bc1cc56b..cbc1303e761e 100644 --- a/drivers/scsi/qla2xxx/Makefile +++ b/drivers/scsi/qla2xxx/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ - qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o + qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \ + qla_edif.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 3aa9869f6fae..22191e9a04a0 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2435,6 +2435,7 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show, qla2x00_port_speed_store); static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); +static DEVICE_ATTR_RO(edif_doorbell); struct device_attribute *qla2x00_host_attrs[] = { @@ -2480,6 +2481,7 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_port_no, &dev_attr_fw_attr, &dev_attr_dport_diagnostics, + &dev_attr_edif_doorbell, NULL, /* reserve for qlini_mode */ NULL, /* reserve for ql2xiniexchg */ NULL, /* reserve for ql2xexchoffld */ @@ -3107,6 +3109,9 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) qla2x00_wait_for_sess_deletion(vha); qla_nvme_delete(vha); + qla_enode_stop(vha); + qla_edb_stop(vha); + vha->flags.delete_progress = 1; qlt_remove_target(ha, vha); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index d42b2ad84049..4b5d28d89d69 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -25,6 +25,10 @@ void qla2x00_bsg_job_done(srb_t *sp, int res) struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; + ql_dbg(ql_dbg_user, sp->vha, 0x7009, + "%s: sp hdl %x, result=%x bsg ptr %p\n", + __func__, sp->handle, res, bsg_job); + sp->free(sp); bsg_reply->result = res; @@ -53,11 +57,19 @@ void qla2x00_bsg_sp_free(srb_t *sp) bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); } else { - dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, - bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); - dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, - bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + if (sp->remap.remapped) { + dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, + sp->remap.rsp.dma); + dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, + sp->remap.req.dma); + } else { + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); + + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + } } if (sp->type == SRB_CT_CMD || @@ -266,6 +278,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) int req_sg_cnt, rsp_sg_cnt; int rval = (DID_ERROR << 16); uint16_t nextlid = 0; + uint32_t els_cmd = 0; if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); @@ -279,6 +292,9 @@ qla2x00_process_els(struct bsg_job *bsg_job) vha = shost_priv(host); ha = vha->hw; type = "FC_BSG_HST_ELS_NOLOGIN"; + els_cmd = bsg_request->rqst_data.h_els.command_code; + if (els_cmd == ELS_AUTH_ELS) + return qla_edif_process_els(vha, bsg_job); } if (!vha->flags.online) { @@ -2768,10 +2784,13 @@ qla2x00_manage_host_port(struct bsg_job *bsg_job) } static int -qla2x00_process_vendor_specific(struct bsg_job *bsg_job) +qla2x00_process_vendor_specific(struct scsi_qla_host *vha, struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; + ql_dbg(ql_dbg_edif, vha, 0x911b, "%s FC_BSG_HST_VENDOR cmd[0]=0x%x\n", + __func__, bsg_request->rqst_data.h_vendor.vendor_cmd[0]); + switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) { case QL_VND_LOOPBACK: return qla2x00_process_loopback(bsg_job); @@ -2840,6 +2859,9 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job) case QL_VND_DPORT_DIAGNOSTICS: return qla2x00_do_dport_diagnostics(bsg_job); + case QL_VND_EDIF_MGMT: + return qla_edif_app_mgmt(bsg_job); + case QL_VND_SS_GET_FLASH_IMAGE_STATUS: return qla2x00_get_flash_image_status(bsg_job); @@ -2897,12 +2919,19 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) ql_dbg(ql_dbg_user, vha, 0x709f, "BSG: ISP abort active/needed -- cmd=%d.\n", bsg_request->msgcode); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); return -EBUSY; } + if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + return -EIO; + } + skip_chip_chk: - ql_dbg(ql_dbg_user, vha, 0x7000, - "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode); + ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, + "Entered %s msgcode=0x%x. bsg ptr %px\n", + __func__, bsg_request->msgcode, bsg_job); switch (bsg_request->msgcode) { case FC_BSG_RPT_ELS: @@ -2913,7 +2942,7 @@ skip_chip_chk: ret = qla2x00_process_ct(bsg_job); break; case FC_BSG_HST_VENDOR: - ret = qla2x00_process_vendor_specific(bsg_job); + ret = qla2x00_process_vendor_specific(vha, bsg_job); break; case FC_BSG_HST_ADD_RPORT: case FC_BSG_HST_DEL_RPORT: @@ -2922,6 +2951,10 @@ skip_chip_chk: ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); break; } + + ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, + "%s done with return %x\n", __func__, ret); + return ret; } @@ -2936,6 +2969,8 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) unsigned long flags; struct req_que *req; + ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", + __func__, bsg_job); /* find the bsg job from the active list of commands */ spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { @@ -2945,27 +2980,26 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; - if (sp) { - if (((sp->type == SRB_CT_CMD) || - (sp->type == SRB_ELS_CMD_HST) || - (sp->type == SRB_FXIOCB_BCMD)) - && (sp->u.bsg_job == bsg_job)) { - req->outstanding_cmds[cnt] = NULL; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { - ql_log(ql_log_warn, vha, 0x7089, - "mbx abort_command " - "failed.\n"); - bsg_reply->result = -EIO; - } else { - ql_dbg(ql_dbg_user, vha, 0x708a, - "mbx abort_command " - "success.\n"); - bsg_reply->result = 0; - } - spin_lock_irqsave(&ha->hardware_lock, flags); - goto done; + if (sp && + (sp->type == SRB_CT_CMD || + sp->type == SRB_ELS_CMD_HST || + sp->type == SRB_ELS_CMD_HST_NOLOGIN || + sp->type == SRB_FXIOCB_BCMD) && + sp->u.bsg_job == bsg_job) { + req->outstanding_cmds[cnt] = NULL; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(sp)) { + ql_log(ql_log_warn, vha, 0x7089, + "mbx abort_command failed.\n"); + bsg_reply->result = -EIO; + } else { + ql_dbg(ql_dbg_user, vha, 0x708a, + "mbx abort_command success.\n"); + bsg_reply->result = 0; } + spin_lock_irqsave(&ha->hardware_lock, flags); + goto done; + } } } diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index 0274e99e4a12..dd793cf8bc1e 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -31,6 +31,7 @@ #define QL_VND_DPORT_DIAGNOSTICS 0x19 #define QL_VND_GET_PRIV_STATS_EX 0x1A #define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E +#define QL_VND_EDIF_MGMT 0X1F #define QL_VND_MANAGE_HOST_STATS 0x23 #define QL_VND_GET_HOST_STATS 0x24 #define QL_VND_GET_TGT_STATS 0x25 @@ -294,4 +295,6 @@ struct qla_active_regions { uint8_t reserved[32]; } __packed; +#include "qla_edif_bsg.h" + #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 9eb708e5e22e..f1f6c740bdcd 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -367,6 +367,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ #define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */ +#define ql_dbg_edif 0x00000400 /* edif and purex debug */ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, uint32_t, void **); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2f67ec1df3e6..af0e8be0eb9b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -49,6 +49,28 @@ typedef struct { uint8_t domain; } le_id_t; +/* + * 24 bit port ID type definition. + */ +typedef union { + uint32_t b24 : 24; + struct { +#ifdef __BIG_ENDIAN + uint8_t domain; + uint8_t area; + uint8_t al_pa; +#elif defined(__LITTLE_ENDIAN) + uint8_t al_pa; + uint8_t area; + uint8_t domain; +#else +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" +#endif + uint8_t rsvd_1; + } b; +} port_id_t; +#define INVALID_PORT_ID 0xFFFFFF + #include "qla_bsg.h" #include "qla_dsd.h" #include "qla_nx.h" @@ -319,6 +341,13 @@ struct name_list_extended { u32 size; u8 sent; }; + +struct els_reject { + struct fc_els_ls_rjt *c; + dma_addr_t cdma; + u16 size; +}; + /* * Timeout timer counts in seconds */ @@ -345,6 +374,8 @@ struct name_list_extended { #define FW_MAX_EXCHANGES_CNT (32 * 1024) #define REDUCE_EXCHANGES_CNT (8 * 1024) +#define SET_DID_STATUS(stat_var, status) (stat_var = status << 16) + struct req_que; struct qla_tgt_sess; @@ -370,32 +401,10 @@ struct srb_cmd { #define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */ #define SRB_WAKEUP_ON_COMP BIT_6 #define SRB_DIF_BUNDL_DMA_VALID BIT_7 /* DIF: DMA list valid */ +#define SRB_EDIF_CLEANUP_DELETE BIT_9 /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) - -/* - * 24 bit port ID type definition. - */ -typedef union { - uint32_t b24 : 24; - - struct { -#ifdef __BIG_ENDIAN - uint8_t domain; - uint8_t area; - uint8_t al_pa; -#elif defined(__LITTLE_ENDIAN) - uint8_t al_pa; - uint8_t area; - uint8_t domain; -#else -#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" -#endif - uint8_t rsvd_1; - } b; -} port_id_t; -#define INVALID_PORT_ID 0xFFFFFF #define ISP_REG16_DISCONNECT 0xFFFF static inline le_id_t be_id_to_le(be_id_t id) @@ -483,6 +492,7 @@ struct srb_iocb { #define SRB_LOGIN_SKIP_PRLI BIT_2 #define SRB_LOGIN_NVME_PRLI BIT_3 #define SRB_LOGIN_PRLI_ONLY BIT_4 +#define SRB_LOGIN_FCSP BIT_5 uint16_t data[2]; u32 iop[2]; } logio; @@ -587,6 +597,10 @@ struct srb_iocb { u16 cmd; u16 vp_index; } ctrlvp; + struct { + struct edif_sa_ctl *sa_ctl; + struct qla_sa_update_frame sa_frame; + } sa_update; } u; struct timer_list timer; @@ -617,6 +631,21 @@ struct srb_iocb { #define SRB_PRLI_CMD 21 #define SRB_CTRL_VP 22 #define SRB_PRLO_CMD 23 +#define SRB_SA_UPDATE 25 +#define SRB_ELS_CMD_HST_NOLOGIN 26 +#define SRB_SA_REPLACE 27 + +struct qla_els_pt_arg { + u8 els_opcode; + u8 vp_idx; + __le16 nport_handle; + u16 control_flags; + __le32 rx_xchg_address; + port_id_t did; + u32 tx_len, tx_byte_count, rx_len, rx_byte_count; + dma_addr_t tx_addr, rx_addr; + +}; enum { TYPE_SRB, @@ -630,6 +659,13 @@ struct iocb_resource { u16 iocb_cnt; }; +struct bsg_cmd { + struct bsg_job *bsg_job; + union { + struct qla_els_pt_arg els_arg; + } u; +}; + typedef struct srb { /* * Do not move cmd_type field, it needs to @@ -662,7 +698,21 @@ typedef struct srb { struct srb_iocb iocb_cmd; struct bsg_job *bsg_job; struct srb_cmd scmd; + struct bsg_cmd bsg_cmd; } u; + struct { + bool remapped; + struct { + dma_addr_t dma; + void *buf; + uint len; + } req; + struct { + dma_addr_t dma; + void *buf; + uint len; + } rsp; + } remap; /* * Report completion status @res and call sp_put(@sp). @res is * an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a @@ -2294,6 +2344,7 @@ struct imm_ntfy_from_isp { __le16 nport_handle; uint16_t reserved_2; __le16 flags; +#define NOTIFY24XX_FLAGS_FCSP BIT_5 #define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 #define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 __le16 srr_rx_id; @@ -2424,6 +2475,7 @@ enum discovery_state { DSC_LOGIN_COMPLETE, DSC_ADISC, DSC_DELETE_PEND, + DSC_LOGIN_AUTH_PEND, }; enum login_state { /* FW control Target side */ @@ -2563,6 +2615,34 @@ typedef struct fc_port { u64 tgt_short_link_down_cnt; u64 tgt_link_down_time; u64 dev_loss_tmo; + /* + * EDIF parameters for encryption. + */ + struct { + uint32_t enable:1; /* device is edif enabled/req'd */ + uint32_t app_stop:2; + uint32_t app_started:1; + uint32_t secured_login:1; + uint32_t aes_gmac:1; + uint32_t app_sess_online:1; + uint32_t tx_sa_set:1; + uint32_t rx_sa_set:1; + uint32_t tx_sa_pending:1; + uint32_t rx_sa_pending:1; + uint32_t tx_rekey_cnt; + uint32_t rx_rekey_cnt; + uint64_t tx_bytes; + uint64_t rx_bytes; + uint8_t non_secured_login; + uint8_t auth_state; + uint16_t rekey_cnt; + struct list_head edif_indx_list; + spinlock_t indx_list_lock; + + struct list_head tx_sa_list; + struct list_head rx_sa_list; + spinlock_t sa_list_lock; + } edif; } fc_port_t; enum { @@ -2604,7 +2684,8 @@ static const char * const port_dstate_str[] = { "UPD_FCPORT", "LOGIN_COMPLETE", "ADISC", - "DELETE_PEND" + "DELETE_PEND", + "LOGIN_AUTH_PEND", }; /* @@ -2616,6 +2697,8 @@ static const char * const port_dstate_str[] = { #define FCF_ASYNC_SENT BIT_3 #define FCF_CONF_COMP_SUPPORTED BIT_4 #define FCF_ASYNC_ACTIVE BIT_5 +#define FCF_FCSP_DEVICE BIT_6 +#define FCF_EDIF_DELETE BIT_7 /* No loop ID flag. */ #define FC_NO_LOOP_ID 0x1000 @@ -3386,6 +3469,7 @@ enum qla_work_type { QLA_EVT_SP_RETRY, QLA_EVT_IIDMA, QLA_EVT_ELS_PLOGI, + QLA_EVT_SA_REPLACE, }; @@ -3444,6 +3528,11 @@ struct qla_work_evt { u8 fc4_type; srb_t *sp; } gpnft; + struct { + struct edif_sa_ctl *sa_ctl; + fc_port_t *fcport; + uint16_t nport_handle; + } sa_update; } u; }; @@ -3845,7 +3934,6 @@ struct qlt_hw_data { int num_act_qpairs; #define DEFAULT_NAQP 2 spinlock_t atio_lock ____cacheline_aligned; - struct btree_head32 host_map; }; #define MAX_QFULL_CMDS_ALLOC 8192 @@ -3935,6 +4023,7 @@ struct qla_hw_data { uint32_t scm_supported_f:1; /* Enabled in Driver */ uint32_t scm_enabled:1; + uint32_t edif_enabled:1; uint32_t max_req_queue_warned:1; uint32_t plogi_template_valid:1; uint32_t port_isolated:1; @@ -4619,8 +4708,23 @@ struct qla_hw_data { struct qla_hw_data_stat stat; pci_error_state_t pci_error_state; u64 prev_cmd_cnt; + struct dma_pool *purex_dma_pool; + struct btree_head32 host_map; + +#define EDIF_NUM_SA_INDEX 512 +#define EDIF_TX_SA_INDEX_BASE EDIF_NUM_SA_INDEX + void *edif_rx_sa_id_map; + void *edif_tx_sa_id_map; + spinlock_t sadb_fp_lock; + + struct list_head sadb_tx_index_list; + struct list_head sadb_rx_index_list; + spinlock_t sadb_lock; /* protects list */ + struct els_reject elsrej; }; +#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES)) + struct active_regions { uint8_t global; struct { @@ -4659,6 +4763,8 @@ struct purex_item { } iocb; }; +#include "qla_edif.h" + #define SCM_FLAG_RDF_REJECT 0x00 #define SCM_FLAG_RDF_COMPLETED 0x01 @@ -4888,6 +4994,8 @@ typedef struct scsi_qla_host { u64 reset_cmd_err_cnt; u64 link_down_time; u64 short_link_down_cnt; + struct edif_dbell e_dbell; + struct pur_core pur_cinfo; } scsi_qla_host_t; struct qla27xx_image_status { @@ -5088,6 +5196,43 @@ enum nexus_wait_type { WAIT_LUN, }; +#define INVALID_EDIF_SA_INDEX 0xffff +#define RX_DELETE_NO_EDIF_SA_INDEX 0xfffe + +#define QLA_SKIP_HANDLE QLA_TGT_SKIP_HANDLE + +/* edif hash element */ +struct edif_list_entry { + uint16_t handle; /* nport_handle */ + uint32_t update_sa_index; + uint32_t delete_sa_index; + uint32_t count; /* counter for filtering sa_index */ +#define EDIF_ENTRY_FLAGS_CLEANUP 0x01 /* this index is being cleaned up */ + uint32_t flags; /* used by sadb cleanup code */ + fc_port_t *fcport; /* needed by rx delay timer function */ + struct timer_list timer; /* rx delay timer */ + struct list_head next; +}; + +#define EDIF_TX_INDX_BASE 512 +#define EDIF_RX_INDX_BASE 0 +#define EDIF_RX_DELETE_FILTER_COUNT 3 /* delay queuing rx delete until this many */ + +/* entry in the sa_index free pool */ + +struct sa_index_pair { + uint16_t sa_index; + uint32_t spi; +}; + +/* edif sa_index data structure */ +struct edif_sa_index_entry { + struct sa_index_pair sa_pair[2]; + fc_port_t *fcport; + uint16_t handle; + struct list_head next; +}; + /* Refer to SNIA SFF 8247 */ struct sff_8247_a0 { u8 txid; /* transceiver id */ diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c new file mode 100644 index 000000000000..2db954a7aaf1 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -0,0 +1,3409 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell Fibre Channel HBA Driver + * Copyright (c) 2021 Marvell + */ +#include "qla_def.h" +#include "qla_edif.h" + +#include <linux/kthread.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> +#include <scsi/scsi_tcq.h> + +static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle, + struct list_head *sa_list); +static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame); +static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, + uint16_t sa_index); +static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, struct bsg_job *); + +struct edb_node { + struct list_head list; + uint32_t ntype; + union { + port_id_t plogi_did; + uint32_t async; + port_id_t els_sid; + struct edif_sa_update_aen sa_aen; + } u; +}; + +static struct els_sub_cmd { + uint16_t cmd; + const char *str; +} sc_str[] = { + {SEND_ELS, "send ELS"}, + {SEND_ELS_REPLY, "send ELS Reply"}, + {PULL_ELS, "retrieve ELS"}, +}; + +const char *sc_to_str(uint16_t cmd) +{ + int i; + struct els_sub_cmd *e; + + for (i = 0; i < ARRAY_SIZE(sc_str); i++) { + e = sc_str + i; + if (cmd == e->cmd) + return e->str; + } + return "unknown"; +} + +static struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport, + uint16_t handle) +{ + struct edif_list_entry *entry; + struct edif_list_entry *tentry; + struct list_head *indx_list = &fcport->edif.edif_indx_list; + + list_for_each_entry_safe(entry, tentry, indx_list, next) { + if (entry->handle == handle) + return entry; + } + return NULL; +} + +/* timeout called when no traffic and delayed rx sa_index delete */ +static void qla2x00_sa_replace_iocb_timeout(struct timer_list *t) +{ + struct edif_list_entry *edif_entry = from_timer(edif_entry, t, timer); + fc_port_t *fcport = edif_entry->fcport; + struct scsi_qla_host *vha = fcport->vha; + struct edif_sa_ctl *sa_ctl; + uint16_t nport_handle; + unsigned long flags = 0; + + ql_dbg(ql_dbg_edif, vha, 0x3069, + "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n", + __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24); + + /* + * if delete_sa_index is valid then no one has serviced this + * delayed delete + */ + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + + /* + * delete_sa_index is invalidated when we find the new sa_index in + * the incoming data stream. If it is not invalidated then we are + * still looking for the new sa_index because there is no I/O and we + * need to just force the rx delete and move on. Otherwise + * we could get another rekey which will result in an error 66. + */ + if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { + uint16_t delete_sa_index = edif_entry->delete_sa_index; + + edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + nport_handle = edif_entry->handle; + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, + delete_sa_index, 0); + + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n", + __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index, + nport_handle); + + sa_ctl->flags = EDIF_SA_CTL_FLG_DEL; + set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state); + qla_post_sa_replace_work(fcport->vha, fcport, + nport_handle, sa_ctl); + + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_ctl not found for delete_sa_index: %d\n", + __func__, edif_entry->delete_sa_index); + } + } else { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + } +} + +/* + * create a new list entry for this nport handle and + * add an sa_update index to the list - called for sa_update + */ +static int qla_edif_list_add_sa_update_index(fc_port_t *fcport, + uint16_t sa_index, uint16_t handle) +{ + struct edif_list_entry *entry; + unsigned long flags = 0; + + /* if the entry exists, then just update the sa_index */ + entry = qla_edif_list_find_sa_index(fcport, handle); + if (entry) { + entry->update_sa_index = sa_index; + entry->count = 0; + return 0; + } + + /* + * This is the normal path - there should be no existing entry + * when update is called. The exception is at startup + * when update is called for the first two sa_indexes + * followed by a delete of the first sa_index + */ + entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + INIT_LIST_HEAD(&entry->next); + entry->handle = handle; + entry->update_sa_index = sa_index; + entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + entry->count = 0; + entry->flags = 0; + timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, 0); + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_add_tail(&entry->next, &fcport->edif.edif_indx_list); + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return 0; +} + +/* remove an entry from the list */ +static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_del(&entry->next); + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); +} + +int qla_post_sa_replace_work(struct scsi_qla_host *vha, + fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.sa_update.fcport = fcport; + e->u.sa_update.sa_ctl = sa_ctl; + e->u.sa_update.nport_handle = nport_handle; + fcport->flags |= FCF_ASYNC_ACTIVE; + return qla2x00_post_work(vha, e); +} + +static void +qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport) +{ + ql_dbg(ql_dbg_edif, vha, 0x2058, + "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n", + fcport->node_name, fcport->port_name, fcport->d_id.b24); + + fcport->edif.tx_rekey_cnt = 0; + fcport->edif.rx_rekey_cnt = 0; + + fcport->edif.tx_bytes = 0; + fcport->edif.rx_bytes = 0; +} + +static int qla_bsg_check(scsi_qla_host_t *vha, struct bsg_job *bsg_job, +fc_port_t *fcport) +{ + struct extra_auth_els *p; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_bsg_auth_els_request *req = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + if (!vha->hw->flags.edif_enabled) { + ql_dbg(ql_dbg_edif, vha, 0x9105, + "%s edif not enabled\n", __func__); + goto done; + } + if (vha->e_dbell.db_flags != EDB_ACTIVE) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + goto done; + } + + p = &req->e; + + /* Get response */ + if (p->sub_cmd == PULL_ELS) { + struct qla_bsg_auth_els_reply *rpl = + (struct qla_bsg_auth_els_reply *)bsg_job->reply; + + qla_pur_get_pending(vha, fcport, bsg_job); + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %p\n", + __func__, sc_to_str(p->sub_cmd), fcport->port_name, + fcport->d_id.b24, rpl->rx_xchg_address, + rpl->r.reply_payload_rcv_len, bsg_job); + + goto done; + } + return 0; + +done: + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return -EIO; +} + +fc_port_t * +qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id) +{ + fc_port_t *f, *tf; + + f = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if ((f->flags & FCF_FCSP_DEVICE)) { + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x2058, + "Found secure fcport - nn %8phN pn %8phN portid=0x%x, 0x%x.\n", + f->node_name, f->port_name, + f->d_id.b24, id->b24); + if (f->d_id.b24 == id->b24) + return f; + } + } + return NULL; +} + +/** + * qla_edif_app_check(): check for valid application id. + * @vha: host adapter pointer + * @appid: application id + * Return: false = fail, true = pass + */ +static bool +qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid) +{ + /* check that the app is allow/known to the driver */ + + if (appid.app_vid == EDIF_APP_ID) { + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, "%s app id ok\n", __func__); + return true; + } + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app id not ok (%x)", + __func__, appid.app_vid); + + return false; +} + +static void qla_edif_reset_auth_wait(struct fc_port *fcport, int state, + int waitonly) +{ + int cnt, max_cnt = 200; + bool traced = false; + + fcport->keep_nport_handle = 1; + + if (!waitonly) { + qla2x00_set_fcport_disc_state(fcport, state); + qlt_schedule_sess_for_deletion(fcport); + } else { + qla2x00_set_fcport_disc_state(fcport, state); + } + + ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, + "%s: waiting for session, max_cnt=%u\n", + __func__, max_cnt); + + cnt = 0; + + if (waitonly) { + /* Marker wait min 10 msecs. */ + msleep(50); + cnt += 50; + } + while (1) { + if (!traced) { + ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, + "%s: session sleep.\n", + __func__); + traced = true; + } + msleep(20); + cnt++; + if (waitonly && (fcport->disc_state == state || + fcport->disc_state == DSC_LOGIN_COMPLETE)) + break; + if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) + break; + if (cnt > max_cnt) + break; + } + + if (!waitonly) { + ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, + "%s: waited for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n", + __func__, fcport->port_name, fcport->loop_id, + fcport->d_id.b24, fcport, fcport->disc_state, cnt); + } else { + ql_dbg(ql_dbg_edif, fcport->vha, 0xf086, + "%s: waited ONLY for session - %8phC, loopid=%x portid=%06x fcport=%p state=%u, cnt=%u\n", + __func__, fcport->port_name, fcport->loop_id, + fcport->d_id.b24, fcport, fcport->disc_state, cnt); + } +} + +static void +qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl, + int index) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + list_del(&sa_ctl->next); + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); + if (index >= 512) + fcport->edif.tx_rekey_cnt--; + else + fcport->edif.rx_rekey_cnt--; + kfree(sa_ctl); +} + +/* return an index to the freepool */ +static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir, + uint16_t sa_index) +{ + void *sa_id_map; + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + u16 lsa_index = sa_index; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "%s: entry\n", __func__); + + if (dir) { + sa_id_map = ha->edif_tx_sa_id_map; + lsa_index -= EDIF_TX_SA_INDEX_BASE; + } else { + sa_id_map = ha->edif_rx_sa_id_map; + } + + spin_lock_irqsave(&ha->sadb_fp_lock, flags); + clear_bit(lsa_index, sa_id_map); + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: index %d added to free pool\n", __func__, sa_index); +} + +static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha, + struct fc_port *fcport, struct edif_sa_index_entry *entry, + int pdir) +{ + struct edif_list_entry *edif_entry; + struct edif_sa_ctl *sa_ctl; + int i, dir; + int key_cnt = 0; + + for (i = 0; i < 2; i++) { + if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX) + continue; + + if (fcport->loop_id != entry->handle) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n", + __func__, i, entry->handle, fcport->loop_id, + entry->sa_pair[i].sa_index); + } + + /* release the sa_ctl */ + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, + entry->sa_pair[i].sa_index, pdir); + if (sa_ctl && + qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index); + qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index); + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl); + } + + /* Release the index */ + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, entry->sa_pair[i].sa_index, entry->handle); + + dir = (entry->sa_pair[i].sa_index < + EDIF_TX_SA_INDEX_BASE) ? 0 : 1; + qla_edif_add_sa_index_to_freepool(fcport, dir, + entry->sa_pair[i].sa_index); + + /* Delete timer on RX */ + if (pdir != SAU_FLG_TX) { + edif_entry = + qla_edif_list_find_sa_index(fcport, entry->handle); + if (edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: remove edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n", + __func__, edif_entry, edif_entry->update_sa_index, + edif_entry->delete_sa_index); + qla_edif_list_delete_sa_index(fcport, edif_entry); + /* + * valid delete_sa_index indicates there is a rx + * delayed delete queued + */ + if (edif_entry->delete_sa_index != + INVALID_EDIF_SA_INDEX) { + del_timer(&edif_entry->timer); + + /* build and send the aen */ + fcport->edif.rx_sa_set = 1; + fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(vha, + VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, fcport); + } + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: release edif_entry %p, update_sa_index: 0x%x, delete_sa_index: 0x%x\n", + __func__, edif_entry, edif_entry->update_sa_index, + edif_entry->delete_sa_index); + + kfree(edif_entry); + } + } + key_cnt++; + } + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: %d %s keys released\n", + __func__, key_cnt, pdir ? "tx" : "rx"); +} + +/* find an release all outstanding sadb sa_indicies */ +void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport) +{ + struct edif_sa_index_entry *entry, *tmp; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "%s: Starting...\n", __func__); + + spin_lock_irqsave(&ha->sadb_lock, flags); + + list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { + if (entry->fcport == fcport) { + list_del(&entry->next); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + __qla2x00_release_all_sadb(vha, fcport, entry, 0); + kfree(entry); + spin_lock_irqsave(&ha->sadb_lock, flags); + break; + } + } + + list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { + if (entry->fcport == fcport) { + list_del(&entry->next); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX); + + kfree(entry); + spin_lock_irqsave(&ha->sadb_lock, flags); + break; + } + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); +} + +/** + * qla_edif_app_start: application has announce its present + * @vha: host adapter pointer + * @bsg_job: user request + * + * Set/activate doorbell. Reset current sessions and re-login with + * secure flag. + */ +static int +qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct app_start appstart; + struct app_start_reply appreply; + struct fc_port *fcport, *tf; + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app start\n", __func__); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appstart, + sizeof(struct app_start)); + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app_vid=%x app_start_flags %x\n", + __func__, appstart.app_info.app_vid, appstart.app_start_flags); + + if (vha->e_dbell.db_flags != EDB_ACTIVE) { + /* mark doorbell as active since an app is now present */ + vha->e_dbell.db_flags = EDB_ACTIVE; + } else { + ql_dbg(ql_dbg_edif, vha, 0x911e, "%s doorbell already active\n", + __func__); + } + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + ql_dbg(ql_dbg_edif, vha, 0xf084, + "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n", + __func__, fcport, fcport->port_name, + fcport->loop_id, fcport->d_id.b24, + fcport->logout_on_delete); + + ql_dbg(ql_dbg_edif, vha, 0xf084, + "keep %d els_logo %d disc state %d auth state %d stop state %d\n", + fcport->keep_nport_handle, + fcport->send_els_logo, fcport->disc_state, + fcport->edif.auth_state, fcport->edif.app_stop); + + if (atomic_read(&vha->loop_state) == LOOP_DOWN) + break; + + fcport->edif.app_started = 1; + fcport->edif.app_stop = 0; + + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", + __func__, fcport->port_name); + fcport->edif.app_sess_online = 1; + qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0); + qla_edif_sa_ctl_init(vha, fcport); + } + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + /* mark as active since an app is now present */ + vha->pur_cinfo.enode_flags = ENODE_ACTIVE; + } else { + ql_dbg(ql_dbg_edif, vha, 0x911f, "%s enode already active\n", + __func__); + } + + appreply.host_support_edif = vha->hw->flags.edif_enabled; + appreply.edif_enode_active = vha->pur_cinfo.enode_flags; + appreply.edif_edb_active = vha->e_dbell.db_flags; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply) + + sizeof(struct app_start_reply); + + SET_DID_STATUS(bsg_reply->result, DID_OK); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &appreply, + sizeof(struct app_start_reply)); + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s app start completed with 0x%x\n", + __func__, rval); + + return rval; +} + +/** + * qla_edif_app_stop - app has announced it's exiting. + * @vha: host adapter pointer + * @bsg_job: user space command pointer + * + * Free any in flight messages, clear all doorbell events + * to application. Reject any message relate to security. + */ +static int +qla_edif_app_stop(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + struct app_stop appstop; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct fc_port *fcport, *tf; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appstop, + sizeof(struct app_stop)); + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s Stopping APP: app_vid=%x\n", + __func__, appstop.app_info.app_vid); + + /* Call db stop and enode stop functions */ + + /* if we leave this running short waits are operational < 16 secs */ + qla_enode_stop(vha); /* stop enode */ + qla_edb_stop(vha); /* stop db */ + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (fcport->edif.non_secured_login) + continue; + + if (fcport->flags & FCF_FCSP_DEVICE) { + ql_dbg(ql_dbg_edif, vha, 0xf084, + "%s: sess %p from port %8phC lid %#04x s_id %06x logout %d keep %d els_logo %d\n", + __func__, fcport, + fcport->port_name, fcport->loop_id, fcport->d_id.b24, + fcport->logout_on_delete, fcport->keep_nport_handle, + fcport->send_els_logo); + + if (atomic_read(&vha->loop_state) == LOOP_DOWN) + break; + + fcport->edif.app_stop = 1; + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla_edif_reset_auth_wait\n", + __func__, fcport->port_name); + + fcport->send_els_logo = 1; + qlt_schedule_sess_for_deletion(fcport); + + /* qla_edif_flush_sa_ctl_lists(fcport); */ + fcport->edif.app_started = 0; + } + } + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_OK); + + /* no return interface to app - it assumes we cleaned up ok */ + + return rval; +} + +static int +qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport, + struct app_plogi_reply *appplogireply) +{ + int ret = 0; + + if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n", + __func__, fcport->port_name, fcport->edif.tx_sa_set, + fcport->edif.rx_sa_set); + appplogireply->prli_status = 0; + ret = 1; + } else { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC Both SA(s) updated.\n", __func__, + fcport->port_name); + fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0; + appplogireply->prli_status = 1; + } + return ret; +} + +/** + * qla_edif_app_authok - authentication by app succeeded. Driver can proceed + * with prli + * @vha: host adapter pointer + * @bsg_job: user request + */ +static int +qla_edif_app_authok(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + struct auth_complete_cmd appplogiok; + struct app_plogi_reply appplogireply = {0}; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + port_id_t portid = {0}; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appplogiok, + sizeof(struct auth_complete_cmd)); + + switch (appplogiok.type) { + case PL_TYPE_WWPN: + fcport = qla2x00_find_fcport_by_wwpn(vha, + appplogiok.u.wwpn, 0); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s wwpn lookup failed: %8phC\n", + __func__, appplogiok.u.wwpn); + break; + case PL_TYPE_DID: + fcport = qla2x00_find_fcport_by_pid(vha, &appplogiok.u.d_id); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s d_id lookup failed: %x\n", __func__, + portid.b24); + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s undefined type: %x\n", __func__, + appplogiok.type); + break; + } + + if (!fcport) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto errstate_exit; + } + + /* + * if port is online then this is a REKEY operation + * Only do sa update checking + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s Skipping PRLI complete based on rekey\n", __func__); + appplogireply.prli_status = 1; + SET_DID_STATUS(bsg_reply->result, DID_OK); + qla_edif_app_chk_sa_update(vha, fcport, &appplogireply); + goto errstate_exit; + } + + /* make sure in AUTH_PENDING or else reject */ + if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC is not in auth pending state (%x)\n", + __func__, fcport->port_name, fcport->disc_state); + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 0; + goto errstate_exit; + } + + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 1; + if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n", + __func__, fcport->port_name, fcport->edif.tx_sa_set, + fcport->edif.rx_sa_set); + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 0; + goto errstate_exit; + + } else { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC Both SA(s) updated.\n", __func__, + fcport->port_name); + fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0; + } + + if (qla_ini_mode_enabled(vha)) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s AUTH complete - RESUME with prli for wwpn %8phC\n", + __func__, fcport->port_name); + qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 1); + qla24xx_post_prli_work(vha, fcport); + } + +errstate_exit: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &appplogireply, + sizeof(struct app_plogi_reply)); + + return rval; +} + +/** + * qla_edif_app_authfail - authentication by app has failed. Driver is given + * notice to tear down current session. + * @vha: host adapter pointer + * @bsg_job: user request + */ +static int +qla_edif_app_authfail(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + struct auth_complete_cmd appplogifail; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + port_id_t portid = {0}; + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app auth fail\n", __func__); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appplogifail, + sizeof(struct auth_complete_cmd)); + + /* + * TODO: edif: app has failed this plogi. Inform driver to + * take any action (if any). + */ + switch (appplogifail.type) { + case PL_TYPE_WWPN: + fcport = qla2x00_find_fcport_by_wwpn(vha, + appplogifail.u.wwpn, 0); + SET_DID_STATUS(bsg_reply->result, DID_OK); + break; + case PL_TYPE_DID: + fcport = qla2x00_find_fcport_by_pid(vha, &appplogifail.u.d_id); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s d_id lookup failed: %x\n", __func__, + portid.b24); + SET_DID_STATUS(bsg_reply->result, DID_OK); + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s undefined type: %x\n", __func__, + appplogifail.type); + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + break; + } + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s fcport is 0x%p\n", __func__, fcport); + + if (fcport) { + /* set/reset edif values and flags */ + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n", + __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24); + + if (qla_ini_mode_enabled(fcport->vha)) { + fcport->send_els_logo = 1; + qla_edif_reset_auth_wait(fcport, DSC_LOGIN_PEND, 0); + } + } + + return rval; +} + +/** + * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid, + * [initiator|target] mode. It can specific session with specific nport id or + * all sessions. + * @vha: host adapter pointer + * @bsg_job: user request pointer + */ +static int +qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + int32_t num_cnt; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct app_pinfo_req app_req; + struct app_pinfo_reply *app_reply; + port_id_t tdid; + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s app get fcinfo\n", __func__); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &app_req, + sizeof(struct app_pinfo_req)); + + num_cnt = app_req.num_ports; /* num of ports alloc'd by app */ + + app_reply = kzalloc((sizeof(struct app_pinfo_reply) + + sizeof(struct app_pinfo) * num_cnt), GFP_KERNEL); + if (!app_reply) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } else { + struct fc_port *fcport = NULL, *tf; + uint32_t pcnt = 0; + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (!(fcport->flags & FCF_FCSP_DEVICE)) + continue; + + tdid = app_req.remote_pid; + + ql_dbg(ql_dbg_edif, vha, 0x2058, + "APP request entry - portid=%06x.\n", tdid.b24); + + /* Ran out of space */ + if (pcnt > app_req.num_ports) + break; + + if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24) + continue; + + app_reply->ports[pcnt].rekey_count = + fcport->edif.rekey_cnt; + + app_reply->ports[pcnt].remote_type = + VND_CMD_RTYPE_UNKNOWN; + if (fcport->port_type & (FCT_NVME_TARGET | FCT_TARGET)) + app_reply->ports[pcnt].remote_type |= + VND_CMD_RTYPE_TARGET; + if (fcport->port_type & (FCT_NVME_INITIATOR | FCT_INITIATOR)) + app_reply->ports[pcnt].remote_type |= + VND_CMD_RTYPE_INITIATOR; + + app_reply->ports[pcnt].remote_pid = fcport->d_id; + + ql_dbg(ql_dbg_edif, vha, 0x2058, + "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x\n", + fcport->node_name, fcport->port_name, pcnt, fcport->d_id.b24); + + switch (fcport->edif.auth_state) { + case VND_CMD_AUTH_STATE_ELS_RCVD: + if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) { + fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED; + app_reply->ports[pcnt].auth_state = + VND_CMD_AUTH_STATE_NEEDED; + } else { + app_reply->ports[pcnt].auth_state = + VND_CMD_AUTH_STATE_ELS_RCVD; + } + break; + default: + app_reply->ports[pcnt].auth_state = fcport->edif.auth_state; + break; + } + + memcpy(app_reply->ports[pcnt].remote_wwpn, + fcport->port_name, 8); + + app_reply->ports[pcnt].remote_state = + (atomic_read(&fcport->state) == + FCS_ONLINE ? 1 : 0); + + pcnt++; + + if (tdid.b24 != 0) + break; + } + app_reply->port_count = pcnt; + SET_DID_STATUS(bsg_reply->result, DID_OK); + } + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, app_reply, + sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * num_cnt); + + kfree(app_reply); + + return rval; +} + +/** + * qla_edif_app_getstats - app would like to read various statistics info + * @vha: host adapter pointer + * @bsg_job: user request + */ +static int32_t +qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + int32_t rval = 0; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t ret_size, size; + + struct app_sinfo_req app_req; + struct app_stats_reply *app_reply; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &app_req, + sizeof(struct app_sinfo_req)); + if (app_req.num_ports == 0) { + ql_dbg(ql_dbg_async, vha, 0x911d, + "%s app did not indicate number of ports to return\n", + __func__); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } + + size = sizeof(struct app_stats_reply) + + (sizeof(struct app_sinfo) * app_req.num_ports); + + if (size > bsg_job->reply_payload.payload_len) + ret_size = bsg_job->reply_payload.payload_len; + else + ret_size = size; + + app_reply = kzalloc(size, GFP_KERNEL); + if (!app_reply) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } else { + struct fc_port *fcport = NULL, *tf; + uint32_t pcnt = 0; + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (fcport->edif.enable) { + if (pcnt > app_req.num_ports) + break; + + app_reply->elem[pcnt].rekey_count = + fcport->edif.rekey_cnt; + app_reply->elem[pcnt].tx_bytes = + fcport->edif.tx_bytes; + app_reply->elem[pcnt].rx_bytes = + fcport->edif.rx_bytes; + + memcpy(app_reply->elem[pcnt].remote_wwpn, + fcport->port_name, 8); + + pcnt++; + } + } + app_reply->elem_count = pcnt; + SET_DID_STATUS(bsg_reply->result, DID_OK); + } + + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, app_reply, ret_size); + + kfree(app_reply); + + return rval; +} + +int32_t +qla_edif_app_mgmt(struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct app_id appcheck; + bool done = true; + int32_t rval = 0; + uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s vnd subcmd=%x\n", + __func__, vnd_sc); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appcheck, + sizeof(struct app_id)); + + if (!vha->hw->flags.edif_enabled || + test_bit(VPORT_DELETE, &vha->dpc_flags)) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s edif not enabled or vp delete. bsg ptr done %p. dpc_flags %lx\n", + __func__, bsg_job, vha->dpc_flags); + + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + if (!qla_edif_app_check(vha, appcheck)) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s app checked failed.\n", + __func__); + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + switch (vnd_sc) { + case QL_VND_SC_SA_UPDATE: + done = false; + rval = qla24xx_sadb_update(bsg_job); + break; + case QL_VND_SC_APP_START: + rval = qla_edif_app_start(vha, bsg_job); + break; + case QL_VND_SC_APP_STOP: + rval = qla_edif_app_stop(vha, bsg_job); + break; + case QL_VND_SC_AUTH_OK: + rval = qla_edif_app_authok(vha, bsg_job); + break; + case QL_VND_SC_AUTH_FAIL: + rval = qla_edif_app_authfail(vha, bsg_job); + break; + case QL_VND_SC_GET_FCINFO: + rval = qla_edif_app_getfcinfo(vha, bsg_job); + break; + case QL_VND_SC_GET_STATS: + rval = qla_edif_app_getstats(vha, bsg_job); + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x911d, "%s unknown cmd=%x\n", + __func__, + bsg_request->rqst_data.h_vendor.vendor_cmd[1]); + rval = EXT_STATUS_INVALID_PARAM; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + break; + } + +done: + if (done) { + ql_dbg(ql_dbg_user, vha, 0x7009, + "%s: %d bsg ptr done %p\n", __func__, __LINE__, bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + } + + return rval; +} + +static struct edif_sa_ctl * +qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame, + int dir) +{ + struct edif_sa_ctl *sa_ctl; + struct qla_sa_update_frame *sap; + int index = sa_frame->fast_sa_index; + unsigned long flags = 0; + + sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL); + if (!sa_ctl) { + /* couldn't get space */ + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "unable to allocate SA CTL\n"); + return NULL; + } + + /* + * need to allocate sa_index here and save it + * in both sa_ctl->index and sa_frame->fast_sa_index; + * If alloc fails then delete sa_ctl and return NULL + */ + INIT_LIST_HEAD(&sa_ctl->next); + sap = &sa_ctl->sa_frame; + *sap = *sa_frame; + sa_ctl->index = index; + sa_ctl->fcport = fcport; + sa_ctl->flags = 0; + sa_ctl->state = 0L; + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: Added sa_ctl %p, index %d, state 0x%lx\n", + __func__, sa_ctl, sa_ctl->index, sa_ctl->state); + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + if (dir == SAU_FLG_TX) + list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list); + else + list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list); + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); + + return sa_ctl; +} + +void +qla_edif_flush_sa_ctl_lists(fc_port_t *fcport) +{ + struct edif_sa_ctl *sa_ctl, *tsa_ctl; + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + + list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list, + next) { + list_del(&sa_ctl->next); + kfree(sa_ctl); + } + + list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list, + next) { + list_del(&sa_ctl->next); + kfree(sa_ctl); + } + + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); +} + +struct edif_sa_ctl * +qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir) +{ + struct edif_sa_ctl *sa_ctl, *tsa_ctl; + struct list_head *sa_list; + + if (dir == SAU_FLG_TX) + sa_list = &fcport->edif.tx_sa_list; + else + sa_list = &fcport->edif.rx_sa_list; + + list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) { + if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) && + sa_ctl->index == index) + return sa_ctl; + } + return NULL; +} + +/* add the sa to the correct list */ +static int +qla24xx_check_sadb_avail_slot(struct bsg_job *bsg_job, fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame) +{ + struct edif_sa_ctl *sa_ctl = NULL; + int dir; + uint16_t sa_index; + + dir = (sa_frame->flags & SAU_FLG_TX); + + /* map the spi to an sa_index */ + sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame); + if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) { + /* process rx delete */ + ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, + "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n", + __func__, fcport->loop_id, sa_frame->spi); + + /* build and send the aen */ + fcport->edif.rx_sa_set = 1; + fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(fcport->vha, + VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, fcport); + + /* force a return of good bsg status; */ + return RX_DELETE_NO_EDIF_SA_INDEX; + } else if (sa_index == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: Failed to get sa_index for spi 0x%x, dir: %d\n", + __func__, sa_frame->spi, dir); + return INVALID_EDIF_SA_INDEX; + } + + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n", + __func__, sa_index, sa_frame->spi, dir, fcport->loop_id); + + /* This is a local copy of sa_frame. */ + sa_frame->fast_sa_index = sa_index; + /* create the sa_ctl */ + sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir); + if (!sa_ctl) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n", + __func__, sa_frame->spi, dir, sa_index); + return -1; + } + + set_bit(EDIF_SA_CTL_USED, &sa_ctl->state); + + if (dir == SAU_FLG_TX) + fcport->edif.tx_rekey_cnt++; + else + fcport->edif.rx_rekey_cnt++; + + ql_dbg(ql_dbg_edif, fcport->vha, 0x9100, + "%s: Found sa_ctl %p, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n", + __func__, sa_ctl, sa_ctl->index, sa_ctl->state, + fcport->edif.tx_rekey_cnt, + fcport->edif.rx_rekey_cnt, fcport->loop_id); + + return 0; +} + +#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0 +#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2 + +int +qla24xx_sadb_update(struct bsg_job *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport = NULL; + srb_t *sp = NULL; + struct edif_list_entry *edif_entry = NULL; + int found = 0; + int rval = 0; + int result = 0; + struct qla_sa_update_frame sa_frame; + struct srb_iocb *iocb_cmd; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x911d, + "%s entered, vha: 0x%p\n", __func__, vha); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &sa_frame, + sizeof(struct qla_sa_update_frame)); + + /* Check if host is online */ + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x70a1, "Host is not online\n"); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + if (vha->e_dbell.db_flags != EDB_ACTIVE) { + ql_log(ql_log_warn, vha, 0x70a1, "App not started\n"); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + fcport = qla2x00_find_fcport_by_pid(vha, &sa_frame.port_id); + if (fcport) { + found = 1; + if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY) + fcport->edif.tx_bytes = 0; + if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY) + fcport->edif.rx_bytes = 0; + } + + if (!found) { + ql_dbg(ql_dbg_edif, vha, 0x70a3, "Failed to find port= %06x\n", + sa_frame.port_id.b24); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE); + goto done; + } + + /* make sure the nport_handle is valid */ + if (fcport->loop_id == FC_NO_LOOP_ID) { + ql_dbg(ql_dbg_edif, vha, 0x70e1, + "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n", + __func__, fcport->port_name, sa_frame.spi, + fcport->disc_state); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT); + goto done; + } + + /* allocate and queue an sa_ctl */ + result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame); + + /* failure of bsg */ + if (result == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, vha, 0x70e1, + "%s: %8phN, skipping update.\n", + __func__, fcport->port_name); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + + /* rx delete failure */ + } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, vha, 0x70e1, + "%s: %8phN, skipping rx delete.\n", + __func__, fcport->port_name); + SET_DID_STATUS(bsg_reply->result, DID_OK); + goto done; + } + + ql_dbg(ql_dbg_edif, vha, 0x70e1, + "%s: %8phN, sa_index in sa_frame: %d flags %xh\n", + __func__, fcport->port_name, sa_frame.fast_sa_index, + sa_frame.flags); + + /* looking for rx index and delete */ + if (((sa_frame.flags & SAU_FLG_TX) == 0) && + (sa_frame.flags & SAU_FLG_INV)) { + uint16_t nport_handle = fcport->loop_id; + uint16_t sa_index = sa_frame.fast_sa_index; + + /* + * make sure we have an existing rx key, otherwise just process + * this as a straight delete just like TX + * This is NOT a normal case, it indicates an error recovery or key cleanup + * by the ipsec code above us. + */ + edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id); + if (!edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing delete for sa_index 0x%x\n", + __func__, fcport->loop_id, sa_index); + goto force_rx_delete; + } + + /* + * if we have a forced delete for rx, remove the sa_index from the edif list + * and proceed with normal delete. The rx delay timer should not be running + */ + if ((sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) { + qla_edif_list_delete_sa_index(fcport, edif_entry); + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, forcing DELETE\n", + __func__, fcport->loop_id, sa_index); + kfree(edif_entry); + goto force_rx_delete; + } + + /* + * delayed rx delete + * + * if delete_sa_index is not invalid then there is already + * a delayed index in progress, return bsg bad status + */ + if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { + struct edif_sa_ctl *sa_ctl; + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: delete for lid 0x%x, delete_sa_index %d is pending\n", + __func__, edif_entry->handle, edif_entry->delete_sa_index); + + /* free up the sa_ctl that was allocated with the sa_index */ + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index, + (sa_frame.flags & SAU_FLG_TX)); + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_ctl for index %d\n", + __func__, sa_ctl->index); + qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index); + } + + /* release the sa_index */ + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, sa_index, nport_handle); + qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index); + + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + fcport->edif.rekey_cnt++; + + /* configure and start the rx delay timer */ + edif_entry->fcport = fcport; + edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ; + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n", + __func__, edif_entry, sa_index, nport_handle); + + /* + * Start the timer when we queue the delayed rx delete. + * This is an activity timer that goes off if we have not + * received packets with the new sa_index + */ + add_timer(&edif_entry->timer); + + /* + * sa_delete for rx key with an active rx key including this one + * add the delete rx sa index to the hash so we can look for it + * in the rsp queue. Do this after making any changes to the + * edif_entry as part of the rx delete. + */ + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n", + __func__, sa_index, nport_handle, bsg_job); + + edif_entry->delete_sa_index = sa_index; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + + goto done; + + /* + * rx index and update + * add the index to the list and continue with normal update + */ + } else if (((sa_frame.flags & SAU_FLG_TX) == 0) && + ((sa_frame.flags & SAU_FLG_INV) == 0)) { + /* sa_update for rx key */ + uint32_t nport_handle = fcport->loop_id; + uint16_t sa_index = sa_frame.fast_sa_index; + int result; + + /* + * add the update rx sa index to the hash so we can look for it + * in the rsp queue and continue normally + */ + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: adding update sa_index %d, lid 0x%x to edif_list\n", + __func__, sa_index, nport_handle); + + result = qla_edif_list_add_sa_update_index(fcport, sa_index, + nport_handle); + if (result) { + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n", + __func__, sa_index, nport_handle); + } + } + if (sa_frame.flags & SAU_FLG_GMAC_MODE) + fcport->edif.aes_gmac = 1; + else + fcport->edif.aes_gmac = 0; + +force_rx_delete: + /* + * sa_update for both rx and tx keys, sa_delete for tx key + * immediately process the request + */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + sp->type = SRB_SA_UPDATE; + sp->name = "bsg_sa_update"; + sp->u.bsg_job = bsg_job; + /* sp->free = qla2x00_bsg_sp_free; */ + sp->free = qla2x00_rel_sp; + sp->done = qla2x00_bsg_job_done; + iocb_cmd = &sp->u.iocb_cmd; + iocb_cmd->u.sa_update.sa_frame = sa_frame; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_dbg_edif, vha, 0x70e3, + "qla2x00_start_sp failed=%d.\n", rval); + + qla2x00_rel_sp(sp); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: %s sent, hdl=%x, portid=%06x.\n", + __func__, sp->name, sp->handle, fcport->d_id.b24); + + fcport->edif.rekey_cnt++; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_OK); + + return 0; + +/* + * send back error status + */ +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s:status: FAIL, result: 0x%x, bsg ptr done %p\n", + __func__, bsg_reply->result, bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static void +qla_enode_free(scsi_qla_host_t *vha, struct enode *node) +{ + node->ntype = N_UNDEF; + kfree(node); +} + +/** + * qla_enode_init - initialize enode structs & lock + * @vha: host adapter pointer + * + * should only be called when driver attaching + */ +void +qla_enode_init(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + char name[32]; + + if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) { + /* list still active - error */ + ql_dbg(ql_dbg_edif, vha, 0x09102, "%s enode still active\n", + __func__); + return; + } + + /* initialize lock which protects pur_core & init list */ + spin_lock_init(&vha->pur_cinfo.pur_lock); + INIT_LIST_HEAD(&vha->pur_cinfo.head); + + snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME, + ha->pdev->device); +} + +/** + * qla_enode_stop - stop and clear and enode data + * @vha: host adapter pointer + * + * called when app notified it is exiting + */ +void +qla_enode_stop(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct enode *node, *q; + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s enode not active\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + + vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */ + + /* hopefully this is a null list at this point */ + list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) { + ql_dbg(ql_dbg_edif, vha, 0x910f, + "%s freeing enode type=%x, cnt=%x\n", __func__, node->ntype, + node->dinfo.nodecnt); + list_del_init(&node->list); + qla_enode_free(vha, node); + } + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); +} + +/* + * allocate enode struct and populate buffer + * returns: enode pointer with buffers + * NULL on error + */ +static struct enode * +qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype) +{ + struct enode *node; + struct purexevent *purex; + + node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC); + if (!node) + return NULL; + + purex = &node->u.purexinfo; + purex->msgp = (u8 *)(node + 1); + purex->msgp_len = ELS_MAX_PAYLOAD; + + node->ntype = ntype; + INIT_LIST_HEAD(&node->list); + return node; +} + +static void +qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr) +{ + unsigned long flags; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x9109, + "%s add enode for type=%x, cnt=%x\n", + __func__, ptr->ntype, ptr->dinfo.nodecnt); + + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + list_add_tail(&ptr->list, &vha->pur_cinfo.head); + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + return; +} + +static struct enode * +qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2) +{ + struct enode *node_rtn = NULL; + struct enode *list_node = NULL; + unsigned long flags; + struct list_head *pos, *q; + uint32_t sid; + uint32_t rw_flag; + struct purexevent *purex; + + /* secure the list from moving under us */ + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + + list_for_each_safe(pos, q, &vha->pur_cinfo.head) { + list_node = list_entry(pos, struct enode, list); + + /* node type determines what p1 and p2 are */ + purex = &list_node->u.purexinfo; + sid = p1; + rw_flag = p2; + + if (purex->pur_info.pur_sid.b24 == sid) { + if (purex->pur_info.pur_pend == 1 && + rw_flag == PUR_GET) { + /* + * if the receive is in progress + * and its a read/get then can't + * transfer yet + */ + ql_dbg(ql_dbg_edif, vha, 0x9106, + "%s purex xfer in progress for sid=%x\n", + __func__, sid); + } else { + /* found it and its complete */ + node_rtn = list_node; + list_del(pos); + break; + } + } + } + + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + return node_rtn; +} + +/** + * qla_pur_get_pending - read/return authentication message sent + * from remote port + * @vha: host adapter pointer + * @fcport: session pointer + * @bsg_job: user request where the message is copy to. + */ +static int +qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport, + struct bsg_job *bsg_job) +{ + struct enode *ptr; + struct purexevent *purex; + struct qla_bsg_auth_els_reply *rpl = + (struct qla_bsg_auth_els_reply *)bsg_job->reply; + + bsg_job->reply_len = sizeof(*rpl); + + ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET); + if (!ptr) { + ql_dbg(ql_dbg_edif, vha, 0x9111, + "%s no enode data found for %8phN sid=%06x\n", + __func__, fcport->port_name, fcport->d_id.b24); + SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY); + return -EIO; + } + + /* + * enode is now off the linked list and is ours to deal with + */ + purex = &ptr->u.purexinfo; + + /* Copy info back to caller */ + rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address; + + SET_DID_STATUS(rpl->r.result, DID_OK); + rpl->r.reply_payload_rcv_len = + sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, purex->msgp, + purex->pur_info.pur_bytes_rcvd, 0); + + /* data copy / passback completed - destroy enode */ + qla_enode_free(vha, ptr); + + return 0; +} + +/* it is assume qpair lock is held */ +static int +qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp, + struct qla_els_pt_arg *a) +{ + struct els_entry_24xx *els_iocb; + + els_iocb = __qla2x00_alloc_iocbs(qp, NULL); + if (!els_iocb) { + ql_log(ql_log_warn, vha, 0x700c, + "qla2x00_alloc_iocbs failed.\n"); + return QLA_FUNCTION_FAILED; + } + + qla_els_pt_iocb(vha, els_iocb, a); + + ql_dbg(ql_dbg_edif, vha, 0x0183, + "Sending ELS reject...\n"); + ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x0185, + vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c)); + /* flush iocb to mem before notifying hw doorbell */ + wmb(); + qla2x00_start_iocbs(vha, qp->req); + return 0; +} + +void +qla_edb_init(scsi_qla_host_t *vha) +{ + if (vha->e_dbell.db_flags == EDB_ACTIVE) { + /* list already init'd - error */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "edif db already initialized, cannot reinit\n"); + return; + } + + /* initialize lock which protects doorbell & init list */ + spin_lock_init(&vha->e_dbell.db_lock); + INIT_LIST_HEAD(&vha->e_dbell.head); + + /* create and initialize doorbell */ + init_completion(&vha->e_dbell.dbell); +} + +static void +qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node) +{ + /* + * releases the space held by this edb node entry + * this function does _not_ free the edb node itself + * NB: the edb node entry passed should not be on any list + * + * currently for doorbell there's no additional cleanup + * needed, but here as a placeholder for furture use. + */ + + if (!node) { + ql_dbg(ql_dbg_edif, vha, 0x09122, + "%s error - no valid node passed\n", __func__); + return; + } + + node->ntype = N_UNDEF; +} + +/* function called when app is stopping */ + +void +qla_edb_stop(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct edb_node *node, *q; + + if (vha->e_dbell.db_flags != EDB_ACTIVE) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + + vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */ + /* hopefully this is a null list at this point */ + list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) { + ql_dbg(ql_dbg_edif, vha, 0x910f, + "%s freeing edb_node type=%x\n", + __func__, node->ntype); + qla_edb_node_free(vha, node); + list_del(&node->list); + + kfree(node); + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + /* wake up doorbell waiters - they'll be dismissed with error code */ + complete_all(&vha->e_dbell.dbell); +} + +static struct edb_node * +qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype) +{ + struct edb_node *node; + + node = kzalloc(sizeof(*node), GFP_ATOMIC); + if (!node) { + /* couldn't get space */ + ql_dbg(ql_dbg_edif, vha, 0x9100, + "edb node unable to be allocated\n"); + return NULL; + } + + node->ntype = ntype; + INIT_LIST_HEAD(&node->list); + return node; +} + +/* adds a already allocated enode to the linked list */ +static bool +qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr) +{ + unsigned long flags; + + if (vha->e_dbell.db_flags != EDB_ACTIVE) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled\n", __func__); + return false; + } + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + list_add_tail(&ptr->list, &vha->e_dbell.head); + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + /* ring doorbell for waiters */ + complete(&vha->e_dbell.dbell); + + return true; +} + +/* adds event to doorbell list */ +void +qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, + uint32_t data, uint32_t data2, fc_port_t *sfcport) +{ + struct edb_node *edbnode; + fc_port_t *fcport = sfcport; + port_id_t id; + + if (!vha->hw->flags.edif_enabled) { + /* edif not enabled */ + return; + } + + if (vha->e_dbell.db_flags != EDB_ACTIVE) { + if (fcport) + fcport->edif.auth_state = dbtype; + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s doorbell not enabled (type=%d\n", __func__, dbtype); + return; + } + + edbnode = qla_edb_node_alloc(vha, dbtype); + if (!edbnode) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s unable to alloc db node\n", __func__); + return; + } + + if (!fcport) { + id.b.domain = (data >> 16) & 0xff; + id.b.area = (data >> 8) & 0xff; + id.b.al_pa = data & 0xff; + ql_dbg(ql_dbg_edif, vha, 0x09222, + "%s: Arrived s_id: %06x\n", __func__, + id.b24); + fcport = qla2x00_find_fcport_by_pid(vha, &id); + if (!fcport) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s can't find fcport for sid= 0x%x - ignoring\n", + __func__, id.b24); + kfree(edbnode); + return; + } + } + + /* populate the edb node */ + switch (dbtype) { + case VND_CMD_AUTH_STATE_NEEDED: + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + edbnode->u.plogi_did.b24 = fcport->d_id.b24; + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + edbnode->u.els_sid.b24 = fcport->d_id.b24; + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + edbnode->u.sa_aen.port_id = fcport->d_id; + edbnode->u.sa_aen.status = data; + edbnode->u.sa_aen.key_type = data2; + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s unknown type: %x\n", __func__, dbtype); + qla_edb_node_free(vha, edbnode); + kfree(edbnode); + edbnode = NULL; + break; + } + + if (edbnode && (!qla_edb_node_add(vha, edbnode))) { + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s unable to add dbnode\n", __func__); + qla_edb_node_free(vha, edbnode); + kfree(edbnode); + return; + } + if (edbnode && fcport) + fcport->edif.auth_state = dbtype; + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode); +} + +static struct edb_node * +qla_edb_getnext(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct edb_node *edbnode = NULL; + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + + /* db nodes are fifo - no qualifications done */ + if (!list_empty(&vha->e_dbell.head)) { + edbnode = list_first_entry(&vha->e_dbell.head, + struct edb_node, list); + list_del(&edbnode->list); + } + + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + return edbnode; +} + +/* + * app uses separate thread to read this. It'll wait until the doorbell + * is rung by the driver or the max wait time has expired + */ +ssize_t +edif_doorbell_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct edb_node *dbnode = NULL; + struct edif_app_dbell *ap = (struct edif_app_dbell *)buf; + uint32_t dat_siz, buf_size, sz; + + /* TODO: app currently hardcoded to 256. Will transition to bsg */ + sz = 256; + + /* stop new threads from waiting if we're not init'd */ + if (vha->e_dbell.db_flags != EDB_ACTIVE) { + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122, + "%s error - edif db not enabled\n", __func__); + return 0; + } + + if (!vha->hw->flags.edif_enabled) { + /* edif not enabled */ + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x09122, + "%s error - edif not enabled\n", __func__); + return -1; + } + + buf_size = 0; + while ((sz - buf_size) >= sizeof(struct edb_node)) { + /* remove the next item from the doorbell list */ + dat_siz = 0; + dbnode = qla_edb_getnext(vha); + if (dbnode) { + ap->event_code = dbnode->ntype; + switch (dbnode->ntype) { + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + case VND_CMD_AUTH_STATE_NEEDED: + ap->port_id = dbnode->u.plogi_did; + dat_siz += sizeof(ap->port_id); + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + ap->port_id = dbnode->u.els_sid; + dat_siz += sizeof(ap->port_id); + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + ap->port_id = dbnode->u.sa_aen.port_id; + memcpy(ap->event_data, &dbnode->u, + sizeof(struct edif_sa_update_aen)); + dat_siz += sizeof(struct edif_sa_update_aen); + break; + default: + /* unknown node type, rtn unknown ntype */ + ap->event_code = VND_CMD_AUTH_STATE_UNDEF; + memcpy(ap->event_data, &dbnode->ntype, 4); + dat_siz += 4; + break; + } + + ql_dbg(ql_dbg_edif, vha, 0x09102, + "%s Doorbell consumed : type=%d %p\n", + __func__, dbnode->ntype, dbnode); + /* we're done with the db node, so free it up */ + qla_edb_node_free(vha, dbnode); + kfree(dbnode); + } else { + break; + } + + ap->event_data_size = dat_siz; + /* 8bytes = ap->event_code + ap->event_data_size */ + buf_size += dat_siz + 8; + ap = (struct edif_app_dbell *)(buf + buf_size); + } + return buf_size; +} + +static void qla_noop_sp_done(srb_t *sp, int res) +{ + sp->free(sp); +} + +/* + * Called from work queue + * build and send the sa_update iocb to delete an rx sa_index + */ +int +qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e) +{ + srb_t *sp; + fc_port_t *fcport = NULL; + struct srb_iocb *iocb_cmd = NULL; + int rval = QLA_SUCCESS; + struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl; + uint16_t nport_handle = e->u.sa_update.nport_handle; + + ql_dbg(ql_dbg_edif, vha, 0x70e6, + "%s: starting, sa_ctl: %p\n", __func__, sa_ctl); + + if (!sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x70e6, + "sa_ctl allocation failed\n"); + return -ENOMEM; + } + + fcport = sa_ctl->fcport; + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_dbg(ql_dbg_edif, vha, 0x70e6, + "SRB allocation failed\n"); + return -ENOMEM; + } + + fcport->flags |= FCF_ASYNC_SENT; + iocb_cmd = &sp->u.iocb_cmd; + iocb_cmd->u.sa_update.sa_ctl = sa_ctl; + + ql_dbg(ql_dbg_edif, vha, 0x3073, + "Enter: SA REPL portid=%06x, sa_ctl %p, index %x, nport_handle: 0x%x\n", + fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle); + /* + * if this is a sadb cleanup delete, mark it so the isr can + * take the correct action + */ + if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) { + /* mark this srb as a cleanup delete */ + sp->flags |= SRB_EDIF_CLEANUP_DELETE; + ql_dbg(ql_dbg_edif, vha, 0x70e6, + "%s: sp 0x%p flagged as cleanup delete\n", __func__, sp); + } + + sp->type = SRB_SA_REPLACE; + sp->name = "SA_REPLACE"; + sp->fcport = fcport; + sp->free = qla2x00_rel_sp; + sp->done = qla_noop_sp_done; + + rval = qla2x00_start_sp(sp); + + if (rval != QLA_SUCCESS) + rval = QLA_FUNCTION_FAILED; + + return rval; +} + +void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb) +{ + int itr = 0; + struct scsi_qla_host *vha = sp->vha; + struct qla_sa_update_frame *sa_frame = + &sp->u.iocb_cmd.u.sa_update.sa_frame; + u8 flags = 0; + + switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) { + case 0: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + break; + case 1: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_INVALIDATE; + break; + case 2: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_TX; + break; + case 3: + ql_dbg(ql_dbg_edif, vha, 0x911d, + "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE; + break; + } + + sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE; + sa_update_iocb->entry_count = 1; + sa_update_iocb->sys_define = 0; + sa_update_iocb->entry_status = 0; + sa_update_iocb->handle = sp->handle; + sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id); + sa_update_iocb->vp_index = sp->fcport->vha->vp_idx; + sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area; + sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain; + + sa_update_iocb->flags = flags; + sa_update_iocb->salt = cpu_to_le32(sa_frame->salt); + sa_update_iocb->spi = cpu_to_le32(sa_frame->spi); + sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index); + + sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP; + if (sp->fcport->edif.aes_gmac) + sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC; + + if (sa_frame->flags & SAU_FLG_KEY256) { + sa_update_iocb->sa_control |= SA_CNTL_KEY256; + for (itr = 0; itr < 32; itr++) + sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr]; + } else { + sa_update_iocb->sa_control |= SA_CNTL_KEY128; + for (itr = 0; itr < 16; itr++) + sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr]; + } + + ql_dbg(ql_dbg_edif, vha, 0x921d, + "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x hdl=%x gmac %d\n", + __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1], + sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index, + sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle, + sp->fcport->edif.aes_gmac); + + if (sa_frame->flags & SAU_FLG_TX) + sp->fcport->edif.tx_sa_pending = 1; + else + sp->fcport->edif.rx_sa_pending = 1; + + sp->fcport->vha->qla_stats.control_requests++; +} + +void +qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb) +{ + struct scsi_qla_host *vha = sp->vha; + struct srb_iocb *srb_iocb = &sp->u.iocb_cmd; + struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl; + uint16_t nport_handle = sp->fcport->loop_id; + + sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE; + sa_update_iocb->entry_count = 1; + sa_update_iocb->sys_define = 0; + sa_update_iocb->entry_status = 0; + sa_update_iocb->handle = sp->handle; + + sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle); + + sa_update_iocb->vp_index = sp->fcport->vha->vp_idx; + sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area; + sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain; + + /* Invalidate the index. salt, spi, control & key are ignore */ + sa_update_iocb->flags = SA_FLAG_INVALIDATE; + sa_update_iocb->salt = 0; + sa_update_iocb->spi = 0; + sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index); + sa_update_iocb->sa_control = 0; + + ql_dbg(ql_dbg_edif, vha, 0x921d, + "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n", + __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1], + sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags, + sa_update_iocb->sa_index, sp->handle); + + sp->fcport->vha->qla_stats.control_requests++; +} + +void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) +{ + struct purex_entry_24xx *p = *pkt; + struct enode *ptr; + int sid; + u16 totlen; + struct purexevent *purex; + struct scsi_qla_host *host = NULL; + int rc; + struct fc_port *fcport; + struct qla_els_pt_arg a; + be_id_t beid; + + memset(&a, 0, sizeof(a)); + + a.els_opcode = ELS_AUTH_ELS; + a.nport_handle = p->nport_handle; + a.rx_xchg_address = p->rx_xchg_addr; + a.did.b.domain = p->s_id[2]; + a.did.b.area = p->s_id[1]; + a.did.b.al_pa = p->s_id[0]; + a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt); + a.tx_addr = vha->hw->elsrej.cdma; + a.vp_idx = vha->vp_idx; + a.control_flags = EPD_ELS_RJT; + + sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16); + + totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE; + if (le16_to_cpu(p->status_flags) & 0x8000) { + totlen = le16_to_cpu(p->trunc_frame_size); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + if (totlen > MAX_PAYLOAD) { + ql_dbg(ql_dbg_edif, vha, 0x0910d, + "%s WARNING: verbose ELS frame received (totlen=%x)\n", + __func__, totlen); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + if (!vha->hw->flags.edif_enabled) { + /* edif support not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x910e, "%s edif not enabled\n", + __func__); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + ptr = qla_enode_alloc(vha, N_PUREX); + if (!ptr) { + ql_dbg(ql_dbg_edif, vha, 0x09109, + "WARNING: enode alloc failed for sid=%x\n", + sid); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + purex = &ptr->u.purexinfo; + purex->pur_info.pur_sid = a.did; + purex->pur_info.pur_pend = 0; + purex->pur_info.pur_bytes_rcvd = totlen; + purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr); + purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle); + purex->pur_info.pur_did.b.domain = p->d_id[2]; + purex->pur_info.pur_did.b.area = p->d_id[1]; + purex->pur_info.pur_did.b.al_pa = p->d_id[0]; + purex->pur_info.vp_idx = p->vp_idx; + + rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp, + purex->msgp_len); + if (rc) { + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + qla_enode_free(vha, ptr); + return; + } + beid.al_pa = purex->pur_info.pur_did.b.al_pa; + beid.area = purex->pur_info.pur_did.b.area; + beid.domain = purex->pur_info.pur_did.b.domain; + host = qla_find_host_by_d_id(vha, beid); + if (!host) { + ql_log(ql_log_fatal, vha, 0x508b, + "%s Drop ELS due to unable to find host %06x\n", + __func__, purex->pur_info.pur_did.b24); + + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + qla_enode_free(vha, ptr); + return; + } + + fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid); + + if (host->e_dbell.db_flags != EDB_ACTIVE || + (fcport && fcport->loop_id == FC_NO_LOOP_ID)) { + ql_dbg(ql_dbg_edif, host, 0x0910c, "%s e_dbell.db_flags =%x %06x\n", + __func__, host->e_dbell.db_flags, + fcport ? fcport->d_id.b24 : 0); + + qla_els_reject_iocb(host, (*rsp)->qpair, &a); + qla_enode_free(host, ptr); + return; + } + + /* add the local enode to the list */ + qla_enode_add(host, ptr); + + ql_dbg(ql_dbg_edif, host, 0x0910c, + "%s COMPLETE purex->pur_info.pur_bytes_rcvd =%xh s:%06x -> d:%06x xchg=%xh\n", + __func__, purex->pur_info.pur_bytes_rcvd, purex->pur_info.pur_sid.b24, + purex->pur_info.pur_did.b24, p->rx_xchg_addr); + + qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL); +} + +static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + void *sa_id_map; + unsigned long flags = 0; + u16 sa_index; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "%s: entry\n", __func__); + + if (dir) + sa_id_map = ha->edif_tx_sa_id_map; + else + sa_id_map = ha->edif_rx_sa_id_map; + + spin_lock_irqsave(&ha->sadb_fp_lock, flags); + sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX); + if (sa_index >= EDIF_NUM_SA_INDEX) { + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + return INVALID_EDIF_SA_INDEX; + } + set_bit(sa_index, sa_id_map); + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + + if (dir) + sa_index += EDIF_TX_SA_INDEX_BASE; + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: index retrieved from free pool %d\n", __func__, sa_index); + + return sa_index; +} + +/* find an sadb entry for an nport_handle */ +static struct edif_sa_index_entry * +qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle, + struct list_head *sa_list) +{ + struct edif_sa_index_entry *entry; + struct edif_sa_index_entry *tentry; + struct list_head *indx_list = sa_list; + + list_for_each_entry_safe(entry, tentry, indx_list, next) { + if (entry->handle == nport_handle) + return entry; + } + return NULL; +} + +/* remove an sa_index from the nport_handle and return it to the free pool */ +static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, + uint16_t sa_index) +{ + struct edif_sa_index_entry *entry; + struct list_head *sa_list; + int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1; + int slot = 0; + int free_slot_count = 0; + scsi_qla_host_t *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: entry\n", __func__); + + if (dir) + sa_list = &ha->sadb_tx_index_list; + else + sa_list = &ha->sadb_rx_index_list; + + entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list); + if (!entry) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: no entry found for nport_handle 0x%x\n", + __func__, nport_handle); + return -1; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + /* + * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic + * the other is use at re-key time. + */ + for (slot = 0; slot < 2; slot++) { + if (entry->sa_pair[slot].sa_index == sa_index) { + entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX; + entry->sa_pair[slot].spi = 0; + free_slot_count++; + qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index); + } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) { + free_slot_count++; + } + } + + if (free_slot_count == 2) { + list_del(&entry->next); + kfree(entry); + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_index %d removed, free_slot_count: %d\n", + __func__, sa_index, free_slot_count); + + return 0; +} + +void +qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req, + struct sa_update_28xx *pkt) +{ + const char *func = "SA_UPDATE_RESPONSE_IOCB"; + srb_t *sp; + struct edif_sa_ctl *sa_ctl; + int old_sa_deleted = 1; + uint16_t nport_handle; + struct scsi_qla_host *vha; + + sp = qla2x00_get_sp_from_handle(v, func, req, pkt); + + if (!sp) { + ql_dbg(ql_dbg_edif, v, 0x3063, + "%s: no sp found for pkt\n", __func__); + return; + } + /* use sp->vha due to npiv */ + vha = sp->vha; + + switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) { + case 0: + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: EDIF SA UPDATE RX IOCB vha: 0x%p index: %d\n", + __func__, vha, pkt->sa_index); + break; + case 1: + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: EDIF SA DELETE RX IOCB vha: 0x%p index: %d\n", + __func__, vha, pkt->sa_index); + break; + case 2: + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: EDIF SA UPDATE TX IOCB vha: 0x%p index: %d\n", + __func__, vha, pkt->sa_index); + break; + case 3: + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: EDIF SA DELETE TX IOCB vha: 0x%p index: %d\n", + __func__, vha, pkt->sa_index); + break; + } + + /* + * dig the nport handle out of the iocb, fcport->loop_id can not be trusted + * to be correct during cleanup sa_update iocbs. + */ + nport_handle = sp->fcport->loop_id; + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: %8phN comp status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x pkt_flags %xh hdl=%x\n", + __func__, sp->fcport->port_name, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info, + nport_handle, pkt->sa_index, pkt->flags, sp->handle); + + /* if rx delete, remove the timer */ + if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) { + struct edif_list_entry *edif_entry; + + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle); + if (edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: removing edif_entry %p, new sa_index: 0x%x\n", + __func__, edif_entry, pkt->sa_index); + qla_edif_list_delete_sa_index(sp->fcport, edif_entry); + del_timer(&edif_entry->timer); + + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: releasing edif_entry %p, new sa_index: 0x%x\n", + __func__, edif_entry, pkt->sa_index); + + kfree(edif_entry); + } + } + + /* + * if this is a delete for either tx or rx, make sure it succeeded. + * The new_sa_info field should be 0xffff on success + */ + if (pkt->flags & SA_FLAG_INVALIDATE) + old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0; + + /* Process update and delete the same way */ + + /* If this is an sadb cleanup delete, bypass sending events to IPSEC */ + if (sp->flags & SRB_EDIF_CLEANUP_DELETE) { + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: nph 0x%x, sa_index %d removed from fw\n", + __func__, sp->fcport->loop_id, pkt->sa_index); + + } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) && + old_sa_deleted) { + /* + * Note: Wa are only keeping track of latest SA, + * so we know when we can start enableing encryption per I/O. + * If all SA's get deleted, let FW reject the IOCB. + + * TODO: edif: don't set enabled here I think + * TODO: edif: prli complete is where it should be set + */ + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "SA(%x)updated for s_id %02x%02x%02x\n", + pkt->new_sa_info, + pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]); + sp->fcport->edif.enable = 1; + if (pkt->flags & SA_FLAG_TX) { + sp->fcport->edif.tx_sa_set = 1; + sp->fcport->edif.tx_sa_pending = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_TX_SA_KEY, sp->fcport); + } else { + sp->fcport->edif.rx_sa_set = 1; + sp->fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, sp->fcport); + } + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n", + __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info, + pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]); + + if (pkt->flags & SA_FLAG_TX) + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED, + QL_VND_TX_SA_KEY, sp->fcport); + else + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED, + QL_VND_RX_SA_KEY, sp->fcport); + } + + /* for delete, release sa_ctl, sa_index */ + if (pkt->flags & SA_FLAG_INVALIDATE) { + /* release the sa_ctl */ + sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport, + le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX)); + if (sa_ctl && + qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index, + (pkt->flags & SA_FLAG_TX)) != NULL) { + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x3063, + "%s: freeing sa_ctl for index %d\n", + __func__, sa_ctl->index); + qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index); + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sa_ctl NOT freed, sa_ctl: %p\n", + __func__, sa_ctl); + } + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, le16_to_cpu(pkt->sa_index), nport_handle); + qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle, + le16_to_cpu(pkt->sa_index)); + /* + * check for a failed sa_update and remove + * the sadb entry. + */ + } else if (pkt->u.comp_sts) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, pkt->sa_index, nport_handle); + qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle, + le16_to_cpu(pkt->sa_index)); + } + + sp->done(sp, 0); +} + +/** + * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP + * @sp: command to send to the ISP + * + * Return: non-zero if a failure occurred, else zero. + */ +int +qla28xx_start_scsi_edif(srb_t *sp) +{ + int nseg; + unsigned long flags; + struct scsi_cmnd *cmd; + uint32_t *clr_ptr; + uint32_t index, i; + uint32_t handle; + uint16_t cnt; + int16_t req_cnt; + uint16_t tot_dsds; + __be32 *fcp_dl; + uint8_t additional_cdb_len; + struct ct6_dsd *ctx; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct cmd_type_6 *cmd_pkt; + struct dsd64 *cur_dsd; + uint8_t avail_dsds = 0; + struct scatterlist *sg; + struct req_que *req = sp->qpair->req; + spinlock_t *lock = sp->qpair->qp_lock_ptr; + + /* Setup device pointers. */ + cmd = GET_CMD_SP(sp); + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x300c, + "qla2x00_marker failed for cmd=%p.\n", cmd); + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(lock, flags); + + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == req->num_outstanding_cmds) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else { + nseg = 0; + } + + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + rd_reg_dword(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + ctx = sp->u.scmd.ct6_ctx = + mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); + if (!ctx) { + ql_log(ql_log_fatal, vha, 0x3010, + "Failed to allocate ctx for cmd=%p.\n", cmd); + goto queuing_error; + } + + memset(ctx, 0, sizeof(struct ct6_dsd)); + ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, + GFP_ATOMIC, &ctx->fcp_cmnd_dma); + if (!ctx->fcp_cmnd) { + ql_log(ql_log_fatal, vha, 0x3011, + "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); + goto queuing_error; + } + + /* Initialize the DSD list and dma handle */ + INIT_LIST_HEAD(&ctx->dsd_list); + ctx->dsd_use_cnt = 0; + + if (cmd->cmd_len > 16) { + additional_cdb_len = cmd->cmd_len - 16; + if ((cmd->cmd_len % 4) != 0) { + /* + * SCSI command bigger than 16 bytes must be + * multiple of 4 + */ + ql_log(ql_log_warn, vha, 0x3012, + "scsi cmd len %d not multiple of 4 for cmd=%p.\n", + cmd->cmd_len, cmd); + goto queuing_error_fcp_cmnd; + } + ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; + } else { + additional_cdb_len = 0; + ctx->fcp_cmnd_len = 12 + 16 + 4; + } + + cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; + cmd_pkt->handle = make_handle(req->id, handle); + + /* + * Zero out remaining portion of packet. + * tagged queuing modifier -- default is TSK_SIMPLE (0). + */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = cpu_to_le32(0); + goto no_dsds; + } + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); + vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; + sp->fcport->edif.tx_bytes += scsi_bufflen(cmd); + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); + vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; + sp->fcport->edif.rx_bytes += scsi_bufflen(cmd); + } + + cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); + cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA)); + + /* One DSD is available in the Command Type 6 IOCB */ + avail_dsds = 1; + cur_dsd = &cmd_pkt->fcp_dsd; + + /* Load data segments */ + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + dma_addr_t sle_dma; + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); + cur_dsd = cont_pkt->dsd; + avail_dsds = 5; + } + + sle_dma = sg_dma_address(sg); + put_unaligned_le64(sle_dma, &cur_dsd->address); + cur_dsd->length = cpu_to_le32(sg_dma_len(sg)); + cur_dsd++; + avail_dsds--; + } + +no_dsds: + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->vha->vp_idx; + + cmd_pkt->entry_type = COMMAND_TYPE_6; + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* build FCP_CMND IU */ + int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 1; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 2; + + /* Populate the FCP_PRIO. */ + if (ha->flags.fcp_prio_enabled) + ctx->fcp_cmnd->task_attribute |= + sp->fcport->fcp_prio << 3; + + memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); + + fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + + additional_cdb_len); + *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); + + cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); + put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address); + + sp->flags |= SRB_FCP_CMND_DMA_VALID; + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + cmd_pkt->entry_status = 0; + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + /* Adjust ring index. */ + wmb(); + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + sp->qpair->cmd_cnt++; + /* Set chip new ring index. */ + wrt_reg_dword(req->req_q_in, req->ring_index); + + spin_unlock_irqrestore(lock, flags); + + return QLA_SUCCESS; + +queuing_error_fcp_cmnd: + dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma); +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + if (sp->u.scmd.ct6_ctx) { + mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool); + sp->u.scmd.ct6_ctx = NULL; + } + spin_unlock_irqrestore(lock, flags); + + return QLA_FUNCTION_FAILED; +} + +/********************************************** + * edif update/delete sa_index list functions * + **********************************************/ + +/* clear the edif_indx_list for this port */ +void qla_edif_list_del(fc_port_t *fcport) +{ + struct edif_list_entry *indx_lst; + struct edif_list_entry *tindx_lst; + struct list_head *indx_list = &fcport->edif.edif_indx_list; + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) { + list_del(&indx_lst->next); + kfree(indx_lst); + } + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); +} + +/****************** + * SADB functions * + ******************/ + +/* allocate/retrieve an sa_index for a given spi */ +static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame) +{ + struct edif_sa_index_entry *entry; + struct list_head *sa_list; + uint16_t sa_index; + int dir = sa_frame->flags & SAU_FLG_TX; + int slot = 0; + int free_slot = -1; + scsi_qla_host_t *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + uint16_t nport_handle = fcport->loop_id; + + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: entry fc_port: %p, nport_handle: 0x%x\n", + __func__, fcport, nport_handle); + + if (dir) + sa_list = &ha->sadb_tx_index_list; + else + sa_list = &ha->sadb_rx_index_list; + + entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list); + if (!entry) { + if ((sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: rx delete request with no entry\n", __func__); + return RX_DELETE_NO_EDIF_SA_INDEX; + } + + /* if there is no entry for this nport, add one */ + entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC); + if (!entry) + return INVALID_EDIF_SA_INDEX; + + sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir); + if (sa_index == INVALID_EDIF_SA_INDEX) { + kfree(entry); + return INVALID_EDIF_SA_INDEX; + } + + INIT_LIST_HEAD(&entry->next); + entry->handle = nport_handle; + entry->fcport = fcport; + entry->sa_pair[0].spi = sa_frame->spi; + entry->sa_pair[0].sa_index = sa_index; + entry->sa_pair[1].spi = 0; + entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX; + spin_lock_irqsave(&ha->sadb_lock, flags); + list_add_tail(&entry->next, sa_list); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning sa_index %d\n", + __func__, nport_handle, sa_frame->spi, sa_index); + + return sa_index; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + + /* see if we already have an entry for this spi */ + for (slot = 0; slot < 2; slot++) { + if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) { + free_slot = slot; + } else { + if (entry->sa_pair[slot].spi == sa_frame->spi) { + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, sa_index %d\n", + __func__, slot, entry->handle, sa_frame->spi, + entry->sa_pair[slot].sa_index); + return entry->sa_pair[slot].sa_index; + } + } + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + /* both slots are used */ + if (free_slot == -1) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n", + __func__, entry->handle, sa_frame->spi); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n", + __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index, + entry->sa_pair[1].spi, entry->sa_pair[1].sa_index); + + return INVALID_EDIF_SA_INDEX; + } + + /* there is at least one free slot, use it */ + sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir); + if (sa_index == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, + "%s: empty freepool!!\n", __func__); + return INVALID_EDIF_SA_INDEX; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + entry->sa_pair[free_slot].spi = sa_frame->spi; + entry->sa_pair[free_slot].sa_index = sa_index; + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, fcport->vha, 0x3063, + "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n", + __func__, free_slot, entry->handle, sa_frame->spi, sa_index); + + return sa_index; +} + +/* release any sadb entries -- only done at teardown */ +void qla_edif_sadb_release(struct qla_hw_data *ha) +{ + struct list_head *pos; + struct list_head *tmp; + struct edif_sa_index_entry *entry; + + list_for_each_safe(pos, tmp, &ha->sadb_rx_index_list) { + entry = list_entry(pos, struct edif_sa_index_entry, next); + list_del(&entry->next); + kfree(entry); + } + + list_for_each_safe(pos, tmp, &ha->sadb_tx_index_list) { + entry = list_entry(pos, struct edif_sa_index_entry, next); + list_del(&entry->next); + kfree(entry); + } +} + +/************************** + * sadb freepool functions + **************************/ + +/* build the rx and tx sa_index free pools -- only done at fcport init */ +int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha) +{ + ha->edif_tx_sa_id_map = + kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL); + + if (!ha->edif_tx_sa_id_map) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x0009, + "Unable to allocate memory for sadb tx.\n"); + return -ENOMEM; + } + + ha->edif_rx_sa_id_map = + kcalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX), sizeof(long), GFP_KERNEL); + if (!ha->edif_rx_sa_id_map) { + kfree(ha->edif_tx_sa_id_map); + ha->edif_tx_sa_id_map = NULL; + ql_log_pci(ql_log_fatal, ha->pdev, 0x0009, + "Unable to allocate memory for sadb rx.\n"); + return -ENOMEM; + } + return 0; +} + +/* release the free pool - only done during fcport teardown */ +void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha) +{ + kfree(ha->edif_tx_sa_id_map); + ha->edif_tx_sa_id_map = NULL; + kfree(ha->edif_rx_sa_id_map); + ha->edif_rx_sa_id_map = NULL; +} + +static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + fc_port_t *fcport, uint32_t handle, uint16_t sa_index) +{ + struct edif_list_entry *edif_entry; + struct edif_sa_ctl *sa_ctl; + uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX; + unsigned long flags = 0; + uint16_t nport_handle = fcport->loop_id; + uint16_t cached_nport_handle; + + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle); + if (!edif_entry) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; /* no pending delete for this handle */ + } + + /* + * check for no pending delete for this index or iocb does not + * match rx sa_index + */ + if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX || + edif_entry->update_sa_index != sa_index) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; + } + + /* + * wait until we have seen at least EDIF_DELAY_COUNT transfers before + * queueing RX delete + */ + if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; + } + + ql_dbg(ql_dbg_edif, vha, 0x5033, + "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, delete_sa_index: 0x%x\n", + __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index); + + delete_sa_index = edif_entry->delete_sa_index; + edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + cached_nport_handle = edif_entry->handle; + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + + /* sanity check on the nport handle */ + if (nport_handle != cached_nport_handle) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n", + __func__, nport_handle, cached_nport_handle); + } + + /* find the sa_ctl for the delete and schedule the delete */ + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0); + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n", + __func__, sa_ctl, sa_index); + ql_dbg(ql_dbg_edif, vha, 0x3063, + "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n", + delete_sa_index, + edif_entry->update_sa_index, nport_handle, handle); + + sa_ctl->flags = EDIF_SA_CTL_FLG_DEL; + set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state); + qla_post_sa_replace_work(fcport->vha, fcport, + nport_handle, sa_ctl); + } else { + ql_dbg(ql_dbg_edif, vha, 0x3063, + "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n", + __func__, delete_sa_index); + } +} + +void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + srb_t *sp, struct sts_entry_24xx *sts24) +{ + fc_port_t *fcport = sp->fcport; + /* sa_index used by this iocb */ + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + uint32_t handle; + + handle = (uint32_t)LSW(sts24->handle); + + /* find out if this status iosb is for a scsi read */ + if (cmd->sc_data_direction != DMA_FROM_DEVICE) + return; + + return __chk_edif_rx_sa_delete_pending(vha, fcport, handle, + le16_to_cpu(sts24->edif_sa_index)); +} + +void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, + struct ctio7_from_24xx *pkt) +{ + __chk_edif_rx_sa_delete_pending(vha, fcport, + pkt->handle, le16_to_cpu(pkt->edif_sa_index)); +} + +static void qla_parse_auth_els_ctl(struct srb *sp) +{ + struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg; + struct bsg_job *bsg_job = sp->u.bsg_cmd.bsg_job; + struct fc_bsg_request *request = bsg_job->request; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + a->tx_len = a->tx_byte_count = sp->remap.req.len; + a->tx_addr = sp->remap.req.dma; + a->rx_len = a->rx_byte_count = sp->remap.rsp.len; + a->rx_addr = sp->remap.rsp.dma; + + if (p->e.sub_cmd == SEND_ELS_REPLY) { + a->control_flags = p->e.extra_control_flags << 13; + a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address); + if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC) + a->els_opcode = ELS_LS_ACC; + else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT) + a->els_opcode = ELS_LS_RJT; + } + a->did = sp->fcport->d_id; + a->els_opcode = request->rqst_data.h_els.command_code; + a->nport_handle = cpu_to_le16(sp->fcport->loop_id); + a->vp_idx = sp->vha->vp_idx; +} + +int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + struct qla_hw_data *ha = vha->hw; + srb_t *sp; + int rval = (DID_ERROR << 16); + port_id_t d_id; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2]; + d_id.b.area = bsg_request->rqst_data.h_els.port_id[1]; + d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0]; + + /* find matching d_id in fcport list */ + fcport = qla2x00_find_fcport_by_pid(vha, &d_id); + if (!fcport) { + ql_dbg(ql_dbg_edif, vha, 0x911a, + "%s fcport not find online portid=%06x.\n", + __func__, d_id.b24); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + return -EIO; + } + + if (qla_bsg_check(vha, bsg_job, fcport)) + return 0; + + if (fcport->loop_id == FC_NO_LOOP_ID) { + ql_dbg(ql_dbg_edif, vha, 0x910d, + "%s ELS code %x, no loop id.\n", __func__, + bsg_request->rqst_data.r_els.els_code); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + return -ENXIO; + } + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n"); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + rval = -EIO; + goto done; + } + + /* pass through is supported only for ISP 4Gb or higher */ + if (!IS_FWI2_CAPABLE(ha)) { + ql_dbg(ql_dbg_user, vha, 0x7001, + "ELS passthru not supported for ISP23xx based adapters.\n"); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + rval = -EPERM; + goto done; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_dbg(ql_dbg_user, vha, 0x7004, + "Failed get sp pid=%06x\n", fcport->d_id.b24); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + sp->remap.req.len = bsg_job->request_payload.payload_len; + sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool, + GFP_KERNEL, &sp->remap.req.dma); + if (!sp->remap.req.buf) { + ql_dbg(ql_dbg_user, vha, 0x7005, + "Failed allocate request dma len=%x\n", + bsg_job->request_payload.payload_len); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done_free_sp; + } + + sp->remap.rsp.len = bsg_job->reply_payload.payload_len; + sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool, + GFP_KERNEL, &sp->remap.rsp.dma); + if (!sp->remap.rsp.buf) { + ql_dbg(ql_dbg_user, vha, 0x7006, + "Failed allocate response dma len=%x\n", + bsg_job->reply_payload.payload_len); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done_free_remap_req; + } + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sp->remap.req.buf, + sp->remap.req.len); + sp->remap.remapped = true; + + sp->type = SRB_ELS_CMD_HST_NOLOGIN; + sp->name = "SPCN_BSG_HST_NOLOGIN"; + sp->u.bsg_cmd.bsg_job = bsg_job; + qla_parse_auth_els_ctl(sp); + + sp->free = qla2x00_bsg_sp_free; + sp->done = qla2x00_bsg_job_done; + + rval = qla2x00_start_sp(sp); + + ql_dbg(ql_dbg_edif, vha, 0x700a, + "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %p\n", + __func__, sc_to_str(p->e.sub_cmd), fcport->port_name, + p->e.extra_rx_xchg_address, p->e.extra_control_flags, + sp->handle, sp->remap.req.len, bsg_job); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x700e, + "qla2x00_start_sp failed = %d\n", rval); + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + rval = -EIO; + goto done_free_remap_rsp; + } + return rval; + +done_free_remap_rsp: + dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, + sp->remap.rsp.dma); +done_free_remap_req: + dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, + sp->remap.req.dma); +done_free_sp: + qla2x00_rel_sp(sp); + +done: + return rval; +} + +void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess) +{ + if (sess->edif.app_sess_online && vha->e_dbell.db_flags & EDB_ACTIVE) { + ql_dbg(ql_dbg_disc, vha, 0xf09c, + "%s: sess %8phN send port_offline event\n", + __func__, sess->port_name); + sess->edif.app_sess_online = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN, + sess->d_id.b24, 0, sess); + qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24); + } +} diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h new file mode 100644 index 000000000000..1cff02e5bd43 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell Fibre Channel HBA Driver + * Copyright (c) 2021 Marvell + */ +#ifndef __QLA_EDIF_H +#define __QLA_EDIF_H + +struct qla_scsi_host; +#define EDIF_APP_ID 0x73730001 + +#define EDIF_MAX_INDEX 2048 +struct edif_sa_ctl { + struct list_head next; + uint16_t del_index; + uint16_t index; + uint16_t slot; + uint16_t flags; +#define EDIF_SA_CTL_FLG_REPL BIT_0 +#define EDIF_SA_CTL_FLG_DEL BIT_1 +#define EDIF_SA_CTL_FLG_CLEANUP_DEL BIT_4 + // Invalidate Index bit and mirrors QLA_SA_UPDATE_FLAGS_DELETE + unsigned long state; +#define EDIF_SA_CTL_USED 1 /* Active Sa update */ +#define EDIF_SA_CTL_PEND 2 /* Waiting for slot */ +#define EDIF_SA_CTL_REPL 3 /* Active Replace and Delete */ +#define EDIF_SA_CTL_DEL 4 /* Delete Pending */ + struct fc_port *fcport; + struct bsg_job *bsg_job; + struct qla_sa_update_frame sa_frame; +}; + +enum enode_flags_t { + ENODE_ACTIVE = 0x1, +}; + +struct pur_core { + enum enode_flags_t enode_flags; + spinlock_t pur_lock; + struct list_head head; +}; + +enum db_flags_t { + EDB_ACTIVE = 0x1, +}; + +struct edif_dbell { + enum db_flags_t db_flags; + spinlock_t db_lock; + struct list_head head; + struct completion dbell; +}; + +#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */ +struct sa_update_28xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* IOCB System handle. */ + + union { + __le16 nport_handle; /* in: N_PORT handle. */ + __le16 comp_sts; /* out: completion status */ +#define CS_PORT_EDIF_SUPP_NOT_RDY 0x64 +#define CS_PORT_EDIF_INV_REQ 0x66 + } u; + uint8_t vp_index; + uint8_t reserved_1; + uint8_t port_id[3]; + uint8_t flags; +#define SA_FLAG_INVALIDATE BIT_0 +#define SA_FLAG_TX BIT_1 // 1=tx, 0=rx + + uint8_t sa_key[32]; /* 256 bit key */ + __le32 salt; + __le32 spi; + uint8_t sa_control; +#define SA_CNTL_ENC_FCSP (1 << 3) +#define SA_CNTL_ENC_OPD (2 << 3) +#define SA_CNTL_ENC_MSK (3 << 3) // mask bits 4,3 +#define SA_CNTL_AES_GMAC (1 << 2) +#define SA_CNTL_KEY256 (2 << 0) +#define SA_CNTL_KEY128 0 + + uint8_t reserved_2; + __le16 sa_index; // reserve: bit 11-15 + __le16 old_sa_info; + __le16 new_sa_info; +}; + +#define NUM_ENTRIES 256 +#define MAX_PAYLOAD 1024 +#define PUR_GET 1 + +struct dinfo { + int nodecnt; + int lstate; +}; + +struct pur_ninfo { + unsigned int pur_pend:1; + port_id_t pur_sid; + port_id_t pur_did; + uint8_t vp_idx; + short pur_bytes_rcvd; + unsigned short pur_nphdl; + unsigned int pur_rx_xchg_address; +}; + +struct purexevent { + struct pur_ninfo pur_info; + unsigned char *msgp; + u32 msgp_len; +}; + +#define N_UNDEF 0 +#define N_PUREX 1 +struct enode { + struct list_head list; + struct dinfo dinfo; + uint32_t ntype; + union { + struct purexevent purexinfo; + } u; +}; +#endif /* __QLA_EDIF_H */ diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h new file mode 100644 index 000000000000..58b718d35d19 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h @@ -0,0 +1,220 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell Fibre Channel HBA Driver + * Copyright (C) 2018- Marvell + * + */ +#ifndef __QLA_EDIF_BSG_H +#define __QLA_EDIF_BSG_H + +/* BSG Vendor specific commands */ +#define ELS_MAX_PAYLOAD 1024 +#ifndef WWN_SIZE +#define WWN_SIZE 8 +#endif +#define VND_CMD_APP_RESERVED_SIZE 32 + +enum auth_els_sub_cmd { + SEND_ELS = 0, + SEND_ELS_REPLY, + PULL_ELS, +}; + +struct extra_auth_els { + enum auth_els_sub_cmd sub_cmd; + uint32_t extra_rx_xchg_address; + uint8_t extra_control_flags; +#define BSG_CTL_FLAG_INIT 0 +#define BSG_CTL_FLAG_LS_ACC 1 +#define BSG_CTL_FLAG_LS_RJT 2 +#define BSG_CTL_FLAG_TRM 3 + uint8_t extra_rsvd[3]; +} __packed; + +struct qla_bsg_auth_els_request { + struct fc_bsg_request r; + struct extra_auth_els e; +}; + +struct qla_bsg_auth_els_reply { + struct fc_bsg_reply r; + uint32_t rx_xchg_address; +}; + +struct app_id { + int app_vid; + uint8_t app_key[32]; +} __packed; + +struct app_start_reply { + uint32_t host_support_edif; + uint32_t edif_enode_active; + uint32_t edif_edb_active; + uint32_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_start { + struct app_id app_info; + uint32_t prli_to; + uint32_t key_shred; + uint8_t app_start_flags; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE - 1]; +} __packed; + +struct app_stop { + struct app_id app_info; + char buf[16]; +} __packed; + +struct app_plogi_reply { + uint32_t prli_status; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +#define RECFG_TIME 1 +#define RECFG_BYTES 2 + +struct app_rekey_cfg { + struct app_id app_info; + uint8_t rekey_mode; + port_id_t d_id; + uint8_t force; + union { + int64_t bytes; + int64_t time; + } rky_units; + + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_pinfo_req { + struct app_id app_info; + uint8_t num_ports; + port_id_t remote_pid; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_pinfo { + port_id_t remote_pid; + uint8_t remote_wwpn[WWN_SIZE]; + uint8_t remote_type; +#define VND_CMD_RTYPE_UNKNOWN 0 +#define VND_CMD_RTYPE_TARGET 1 +#define VND_CMD_RTYPE_INITIATOR 2 + uint8_t remote_state; + uint8_t auth_state; + uint8_t rekey_mode; + int64_t rekey_count; + int64_t rekey_config_value; + int64_t rekey_consumed_value; + + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +/* AUTH States */ +#define VND_CMD_AUTH_STATE_UNDEF 0 +#define VND_CMD_AUTH_STATE_SESSION_SHUTDOWN 1 +#define VND_CMD_AUTH_STATE_NEEDED 2 +#define VND_CMD_AUTH_STATE_ELS_RCVD 3 +#define VND_CMD_AUTH_STATE_SAUPDATE_COMPL 4 + +struct app_pinfo_reply { + uint8_t port_count; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; + struct app_pinfo ports[0]; +} __packed; + +struct app_sinfo_req { + struct app_id app_info; + uint8_t num_ports; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +struct app_sinfo { + uint8_t remote_wwpn[WWN_SIZE]; + int64_t rekey_count; + uint8_t rekey_mode; + int64_t tx_bytes; + int64_t rx_bytes; +} __packed; + +struct app_stats_reply { + uint8_t elem_count; + struct app_sinfo elem[0]; +} __packed; + +struct qla_sa_update_frame { + struct app_id app_info; + uint16_t flags; +#define SAU_FLG_INV 0x01 /* delete key */ +#define SAU_FLG_TX 0x02 /* 1=tx, 0 = rx */ +#define SAU_FLG_FORCE_DELETE 0x08 +#define SAU_FLG_GMAC_MODE 0x20 /* + * GMAC mode is cleartext for the IO + * (i.e. NULL encryption) + */ +#define SAU_FLG_KEY128 0x40 +#define SAU_FLG_KEY256 0x80 + uint16_t fast_sa_index:10, + reserved:6; + uint32_t salt; + uint32_t spi; + uint8_t sa_key[32]; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + port_id_t port_id; +} __packed; + +// used for edif mgmt bsg interface +#define QL_VND_SC_UNDEF 0 +#define QL_VND_SC_SA_UPDATE 1 +#define QL_VND_SC_APP_START 2 +#define QL_VND_SC_APP_STOP 3 +#define QL_VND_SC_AUTH_OK 4 +#define QL_VND_SC_AUTH_FAIL 5 +#define QL_VND_SC_REKEY_CONFIG 6 +#define QL_VND_SC_GET_FCINFO 7 +#define QL_VND_SC_GET_STATS 8 + +/* Application interface data structure for rtn data */ +#define EXT_DEF_EVENT_DATA_SIZE 64 +struct edif_app_dbell { + uint32_t event_code; + uint32_t event_data_size; + union { + port_id_t port_id; + uint8_t event_data[EXT_DEF_EVENT_DATA_SIZE]; + }; +} __packed; + +struct edif_sa_update_aen { + port_id_t port_id; + uint32_t key_type; /* Tx (1) or RX (2) */ + uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */ + uint8_t reserved[16]; +} __packed; + +#define QL_VND_SA_STAT_SUCCESS 0 +#define QL_VND_SA_STAT_FAILED 1 +#define QL_VND_SA_STAT_TIMEOUT 2 +#define QL_VND_SA_STAT_ERROR 3 + +#define QL_VND_RX_SA_KEY 1 +#define QL_VND_TX_SA_KEY 2 + +/* App defines for plogi auth'd ok and plogi auth bad requests */ +struct auth_complete_cmd { + struct app_id app_info; +#define PL_TYPE_WWPN 1 +#define PL_TYPE_DID 2 + uint32_t type; + union { + uint8_t wwpn[WWN_SIZE]; + port_id_t d_id; + } u; + uint32_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __packed; + +#define RX_DELAY_DELETE_TIMEOUT 20 + +#endif /* QLA_EDIF_BSG_H */ diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 49df418030e4..c257af8d87fd 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -82,10 +82,11 @@ struct port_database_24xx { uint8_t port_name[WWN_SIZE]; uint8_t node_name[WWN_SIZE]; - uint8_t reserved_3[4]; + uint8_t reserved_3[2]; + uint16_t nvme_first_burst_size; uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */ uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */ - uint16_t nvme_first_burst_size; + uint8_t secure_login; uint8_t reserved_4[14]; }; @@ -489,6 +490,9 @@ struct cmd_type_6 { struct scsi_lun lun; /* FCP LUN (BE). */ __le16 control_flags; /* Control flags. */ +#define CF_NEW_SA BIT_12 +#define CF_EN_EDIF BIT_9 +#define CF_ADDITIONAL_PARAM_BLK BIT_8 #define CF_DIF_SEG_DESCR_ENABLE BIT_3 #define CF_DATA_SEG_DESCR_ENABLE BIT_2 #define CF_READ_DATA BIT_1 @@ -611,6 +615,7 @@ struct sts_entry_24xx { union { __le16 reserved_1; __le16 nvme_rsp_pyld_len; + __le16 edif_sa_index; /* edif sa_index used for initiator read data */ }; __le16 state_flags; /* State flags. */ @@ -896,6 +901,7 @@ struct logio_entry_24xx { #define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */ #define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */ #define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */ +#define LCF_COMMON_FEAT BIT_7 /* PLOGI - Set Common Features Field */ #define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */ #define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */ #define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */ @@ -920,6 +926,8 @@ struct logio_entry_24xx { uint8_t rsp_size; /* Response size in 32bit words. */ __le32 io_parameter[11]; /* General I/O parameters. */ +#define LIO_COMM_FEAT_FCSP BIT_21 +#define LIO_COMM_FEAT_CIO BIT_31 #define LSC_SCODE_NOLINK 0x01 #define LSC_SCODE_NOIOCB 0x02 #define LSC_SCODE_NOXCB 0x03 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 2f867da822ae..2b8bdb146a8f 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -12,6 +12,7 @@ * Global Function Prototypes in qla_init.c source file. */ extern int qla2x00_initialize_adapter(scsi_qla_host_t *); +extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport); extern int qla2100_pci_config(struct scsi_qla_host *); extern int qla2300_pci_config(struct scsi_qla_host *); @@ -130,6 +131,18 @@ void qla24xx_free_purex_item(struct purex_item *item); extern bool qla24xx_risc_firmware_invalid(uint32_t *); void qla_init_iocb_limit(scsi_qla_host_t *); +void qla_edif_list_del(fc_port_t *fcport); +void qla_edif_sadb_release(struct qla_hw_data *ha); +int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha); +void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha); +void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + srb_t *sp, struct sts_entry_24xx *sts24); +void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, + struct ctio7_from_24xx *ctio); +void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport); +int qla_edif_process_els(scsi_qla_host_t *vha, struct bsg_job *bsgjob); +void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess); +const char *sc_to_str(uint16_t cmd); /* * Global Data in qla_os.c source file. @@ -175,6 +188,7 @@ extern int ql2xenablemsix; extern int qla2xuseresexchforels; extern int ql2xdifbundlinginternalbuffers; extern int ql2xfulldump_on_mpifail; +extern int ql2xsecenable; extern int ql2xenforce_iocb_limit; extern int ql2xabts_wait_nvme; @@ -236,6 +250,8 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, struct purex_item *pkt); void qla_pci_set_eeh_busy(struct scsi_qla_host *); void qla_schedule_eeh_work(struct scsi_qla_host *); +struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, + int index, int dir); /* * Global Functions in qla_mid.c source file. @@ -280,7 +296,10 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *); /* * Global Function Prototypes in qla_iocb.c source file. */ - +void qla_els_pt_iocb(struct scsi_qla_host *vha, + struct els_entry_24xx *pkt, struct qla_els_pt_arg *a); +cont_a64_entry_t *qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, + struct req_que *que); extern uint16_t qla2x00_calc_iocbs_32(uint16_t); extern uint16_t qla2x00_calc_iocbs_64(uint16_t); extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); @@ -310,6 +329,8 @@ extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, struct dsd64 *, uint16_t, struct qla_tgt_cmd *); extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); +extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, + struct qla_work_evt *e); /* * Global Function Prototypes in qla_mbx.c source file. @@ -578,6 +599,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id); fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t); fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8); fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8); +void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp); /* * Global Function Prototypes in qla_sup.c source file. @@ -640,6 +662,8 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *); extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *); +int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt, + struct rsp_que **rsp, u8 *buf, u32 buf_len); /* * Global Function Prototypes in qla_dbg.c source file. @@ -879,6 +903,9 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t, uint32_t); extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t *, uint16_t *); +extern int qla24xx_sadb_update(struct bsg_job *bsg_job); +extern int qla_post_sa_replace_work(struct scsi_qla_host *vha, + fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl); /* 83xx related functions */ void qla83xx_fw_dump(scsi_qla_host_t *vha); @@ -923,6 +950,7 @@ extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *); extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *, response_t *); +struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id); int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, struct imm_ntfy_from_isp *, int); void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *); @@ -935,7 +963,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, void qla24xx_delete_sess_fn(struct work_struct *); void qlt_unknown_atio_work_fn(struct work_struct *); void qlt_update_host_map(struct scsi_qla_host *, port_id_t); -void qlt_remove_target_resources(struct qla_hw_data *); +void qla_remove_hostmap(struct qla_hw_data *ha); void qlt_clr_qp_table(struct scsi_qla_host *vha); void qlt_set_mode(struct scsi_qla_host *); int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode); @@ -950,6 +978,24 @@ extern void qla_nvme_abort_process_comp_status /* nvme.c */ void qla_nvme_unregister_remote_port(struct fc_port *fcport); + +/* qla_edif.c */ +fc_port_t *qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id); +void qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, uint32_t data, uint32_t data2, + fc_port_t *fcport); +void qla_edb_stop(scsi_qla_host_t *vha); +ssize_t edif_doorbell_show(struct device *dev, struct device_attribute *attr, char *buf); +int32_t qla_edif_app_mgmt(struct bsg_job *bsg_job); +void qla_enode_init(scsi_qla_host_t *vha); +void qla_enode_stop(scsi_qla_host_t *vha); +void qla_edif_flush_sa_ctl_lists(fc_port_t *fcport); +void qla_edb_init(scsi_qla_host_t *vha); +int qla28xx_start_scsi_edif(srb_t *sp); +void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb); +void qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb); +void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp); +void qla28xx_sa_update_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct sa_update_28xx *pkt); void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea); #define QLA2XX_HW_ERROR BIT_0 diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 5b6e04a91a18..b16b7d16be12 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -632,7 +632,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id); ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ - if (vha->flags.nvme_enabled) + if (vha->flags.nvme_enabled && qla_ini_mode_enabled(vha)) ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */ sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE; @@ -2826,6 +2826,10 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) if (fcport->disc_state == DSC_DELETE_PEND) return; + /* We will figure-out what happen after AUTH completes */ + if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) + return; + if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f8f471157109..ad0d3f536a31 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -34,7 +34,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *); static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); -static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea); static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, @@ -343,19 +342,28 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); sp->done = qla2x00_async_login_sp_done; - if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) + if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; - else - lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; + } else { + if (vha->hw->flags.edif_enabled && + vha->e_dbell.db_flags & EDB_ACTIVE) { + lio->u.logio.flags |= + (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); + ql_dbg(ql_dbg_disc, vha, 0x2072, + "Async-login: w/ FCSP %8phC hdl=%x, loopid=%x portid=%06x\n", + fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24); + } else { + lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; + } + } if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; ql_log(ql_log_warn, vha, 0x2072, - "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x retries=%d.\n", + "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, - fcport->login_retry); + fcport->d_id.b24, fcport->login_retry); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -378,7 +386,7 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res) { sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); sp->fcport->login_gen++; - qlt_logo_completion_handler(sp->fcport, res); + qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]); sp->free(sp); } @@ -404,10 +412,10 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) sp->done = qla2x00_async_logout_sp_done; ql_dbg(ql_dbg_disc, vha, 0x2070, - "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", + "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, - fcport->port_name); + fcport->port_name, fcport->explicit_logout); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) @@ -692,11 +700,11 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n", + "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, fcport->login_gen, fcport->last_login_gen, - fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id); + fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable); if (fcport->disc_state == DSC_DELETE_PEND) return; @@ -822,6 +830,13 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, qla2x00_post_async_adisc_work(vha, fcport, data); break; + case DSC_LS_PLOGI_COMP: + if (vha->hw->flags.edif_enabled) { + /* check to see if App support Secure */ + qla24xx_post_gpdb_work(vha, fcport, 0); + break; + } + fallthrough; case DSC_LS_PORT_UNAVAIL: default: if (fcport->loop_id == FC_NO_LOOP_ID) { @@ -1191,7 +1206,7 @@ done: sp->free(sp); } -static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) +int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; @@ -1418,6 +1433,60 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } +static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, + struct port_database_24xx *pd) +{ + int rc = 0; + + if (pd->secure_login) { + ql_dbg(ql_dbg_disc, vha, 0x104d, + "Secure Login established on %8phC\n", + fcport->port_name); + fcport->edif.secured_login = 1; + fcport->edif.non_secured_login = 0; + fcport->flags |= FCF_FCSP_DEVICE; + } else { + ql_dbg(ql_dbg_disc, vha, 0x104d, + "non-Secure Login %8phC", + fcport->port_name); + fcport->edif.secured_login = 0; + fcport->edif.non_secured_login = 1; + } + if (vha->hw->flags.edif_enabled) { + if (fcport->flags & FCF_FCSP_DEVICE) { + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND); + /* Start edif prli timer & ring doorbell for app */ + fcport->edif.rx_sa_set = 0; + fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = 0; + fcport->edif.tx_sa_pending = 0; + + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + fcport->d_id.b24); + + if (vha->e_dbell.db_flags == EDB_ACTIVE) { + ql_dbg(ql_dbg_disc, vha, 0x20ef, + "%s %d %8phC EDIF: post DB_AUTH: AUTH needed\n", + __func__, __LINE__, fcport->port_name); + fcport->edif.app_started = 1; + fcport->edif.app_sess_online = 1; + + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, + fcport->d_id.b24, 0, fcport); + } + + rc = 1; + } else { + ql_dbg(ql_dbg_disc, vha, 0x2117, + "%s %d %8phC post prli\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_prli_work(vha, fcport); + rc = 1; + } + } + return rc; +} + static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) { @@ -1460,8 +1529,11 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) case PDS_PRLI_COMPLETE: __qla24xx_parse_gpdb(vha, fcport, pd); break; - case PDS_PLOGI_PENDING: case PDS_PLOGI_COMPLETE: + if (qla_chk_secure_login(vha, fcport, pd)) + return; + fallthrough; + case PDS_PLOGI_PENDING: case PDS_PRLI_PENDING: case PDS_PRLI2_PENDING: /* Set discovery state back to GNL to Relogin attempt */ @@ -2053,26 +2125,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - if (NVME_TARGET(vha->hw, ea->fcport)) { - ql_dbg(ql_dbg_disc, vha, 0x2117, - "%s %d %8phC post prli\n", - __func__, __LINE__, ea->fcport->port_name); - qla24xx_post_prli_work(vha, ea->fcport); - } else { - ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", - __func__, __LINE__, ea->fcport->port_name, - ea->fcport->loop_id, ea->fcport->d_id.b24); - + if (vha->hw->flags.edif_enabled) { set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; ea->fcport->send_els_logo = 0; - ea->fcport->fw_login_state = DSC_LS_PRLI_COMP; + ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_post_gpdb_work(vha, ea->fcport, 0); + } else { + if (NVME_TARGET(vha->hw, fcport)) { + ql_dbg(ql_dbg_disc, vha, 0x2117, + "%s %d %8phC post prli\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_prli_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ea, + "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", + __func__, __LINE__, fcport->port_name, + fcport->loop_id, fcport->d_id.b24); + + set_bit(fcport->loop_id, vha->hw->loop_id_map); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->chip_reset = vha->hw->base_qpair->chip_reset; + fcport->logout_on_delete = 1; + fcport->send_els_logo = 0; + fcport->fw_login_state = DSC_LS_PRLI_COMP; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + qla24xx_post_gpdb_work(vha, fcport, 0); + } } break; case MBS_COMMAND_ERROR: @@ -3877,7 +3961,8 @@ enable_82xx_npiv: } /* Enable PUREX PASSTHRU */ - if (ql2xrdpenable || ha->flags.scm_supported_f) + if (ql2xrdpenable || ha->flags.scm_supported_f || + ha->flags.edif_enabled) qla25xx_set_els_cmds_supported(vha); } else goto failed; @@ -4062,7 +4147,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) } /* Move PUREX, ABTS RX & RIDA to ATIOQ */ - if (ql2xmvasynctoatio && + if (ql2xmvasynctoatio && !ha->flags.edif_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) @@ -4090,7 +4175,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) ha->fw_options[2] &= ~BIT_8; } - if (ql2xrdpenable || ha->flags.scm_supported_f) + if (ql2xrdpenable || ha->flags.scm_supported_f || + ha->flags.edif_enabled) ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; /* Enable Async 8130/8131 events -- transceiver insertion/removal */ @@ -5071,6 +5157,16 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) INIT_LIST_HEAD(&fcport->sess_cmd_list); spin_lock_init(&fcport->sess_cmd_lock); + spin_lock_init(&fcport->edif.sa_list_lock); + INIT_LIST_HEAD(&fcport->edif.tx_sa_list); + INIT_LIST_HEAD(&fcport->edif.rx_sa_list); + + if (vha->e_dbell.db_flags == EDB_ACTIVE) + fcport->edif.app_started = 1; + + spin_lock_init(&fcport->edif.indx_list_lock); + INIT_LIST_HEAD(&fcport->edif.edif_indx_list); + return fcport; } @@ -5084,8 +5180,13 @@ qla2x00_free_fcport(fc_port_t *fcport) fcport->ct_desc.ct_sns = NULL; } + + qla_edif_flush_sa_ctl_lists(fcport); list_del(&fcport->list); qla2x00_clear_loop_id(fcport); + + qla_edif_list_del(fcport); + kfree(fcport); } @@ -5204,6 +5305,12 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) "LOOP READY.\n"); ha->flags.fw_init_done = 1; + if (vha->hw->flags.edif_enabled && + vha->e_dbell.db_flags != EDB_ACTIVE) { + /* wake up authentication app to get ready */ + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, 0); + } + /* * Process any ATIO queue entries that came in * while we weren't online. @@ -5223,7 +5330,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) "%s *** FAILED ***.\n", __func__); } else { ql_dbg(ql_dbg_disc, vha, 0x206b, - "%s: exiting normally.\n", __func__); + "%s: exiting normally. local port wwpn %8phN id %06x)\n", + __func__, vha->port_name, vha->d_id.b24); } /* Restore state if a resync event occurred during processing */ diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index d0ee843f6b04..625d6b237fb2 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -118,7 +118,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) * * Returns a pointer to the continuation type 1 IOCB packet. */ -static inline cont_a64_entry_t * +cont_a64_entry_t * qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) { cont_a64_entry_t *cont_pkt; @@ -145,7 +145,6 @@ inline int qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) { struct scsi_cmnd *cmd = GET_CMD_SP(sp); - uint8_t guard = scsi_host_get_guard(cmd->device->host); /* We always use DIFF Bundling for best performance */ *fw_prot_opts = 0; @@ -166,7 +165,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - if (guard & SHOST_DIX_GUARD_IP) + if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM) *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM; else *fw_prot_opts |= PO_MODE_DIF_PASS; @@ -176,6 +175,9 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) break; } + if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK)) + *fw_prot_opts |= PO_DISABLE_GUARD_CHECK; + return scsi_prot_sg_count(cmd); } @@ -772,74 +774,19 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, { struct scsi_cmnd *cmd = GET_CMD_SP(sp); - switch (scsi_get_prot_type(cmd)) { - case SCSI_PROT_DIF_TYPE0: - /* - * No check for ql2xenablehba_err_chk, as it would be an - * I/O error if hba tag generation is not done. - */ - pkt->ref_tag = cpu_to_le32((uint32_t) - (0xffffffff & scsi_get_lba(cmd))); - - if (!qla2x00_hba_err_chk_enabled(sp)) - break; - - pkt->ref_tag_mask[0] = 0xff; - pkt->ref_tag_mask[1] = 0xff; - pkt->ref_tag_mask[2] = 0xff; - pkt->ref_tag_mask[3] = 0xff; - break; - - /* - * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to - * match LBA in CDB + N - */ - case SCSI_PROT_DIF_TYPE2: - pkt->app_tag = cpu_to_le16(0); - pkt->app_tag_mask[0] = 0x0; - pkt->app_tag_mask[1] = 0x0; - - pkt->ref_tag = cpu_to_le32((uint32_t) - (0xffffffff & scsi_get_lba(cmd))); - - if (!qla2x00_hba_err_chk_enabled(sp)) - break; - - /* enable ALL bytes of the ref tag */ - pkt->ref_tag_mask[0] = 0xff; - pkt->ref_tag_mask[1] = 0xff; - pkt->ref_tag_mask[2] = 0xff; - pkt->ref_tag_mask[3] = 0xff; - break; - - /* For Type 3 protection: 16 bit GUARD only */ - case SCSI_PROT_DIF_TYPE3: - pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] = - pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] = - 0x00; - break; - - /* - * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and - * 16 bit app tag. - */ - case SCSI_PROT_DIF_TYPE1: - pkt->ref_tag = cpu_to_le32((uint32_t) - (0xffffffff & scsi_get_lba(cmd))); - pkt->app_tag = cpu_to_le16(0); - pkt->app_tag_mask[0] = 0x0; - pkt->app_tag_mask[1] = 0x0; - - if (!qla2x00_hba_err_chk_enabled(sp)) - break; + pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd)); - /* enable ALL bytes of the ref tag */ + if (cmd->prot_flags & SCSI_PROT_REF_CHECK && + qla2x00_hba_err_chk_enabled(sp)) { pkt->ref_tag_mask[0] = 0xff; pkt->ref_tag_mask[1] = 0xff; pkt->ref_tag_mask[2] = 0xff; pkt->ref_tag_mask[3] = 0xff; - break; } + + pkt->app_tag = cpu_to_le16(0); + pkt->app_tag_mask[0] = 0x0; + pkt->app_tag_mask[1] = 0x0; } int @@ -905,7 +852,7 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, memset(&sgx, 0, sizeof(struct qla2_sgx)); if (sp) { cmd = GET_CMD_SP(sp); - prot_int = cmd->device->sector_size; + prot_int = scsi_prot_interval(cmd); sgx.tot_bytes = scsi_bufflen(cmd); sgx.cur_sg = scsi_sglist(cmd); @@ -1963,6 +1910,9 @@ qla2xxx_start_scsi_mq(srb_t *sp) struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = sp->qpair; + if (sp->fcport->edif.enable) + return qla28xx_start_scsi_edif(sp); + /* Acquire qpair specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); @@ -2466,6 +2416,12 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); + if (lio->u.logio.flags & SRB_LOGIN_FCSP) { + logio->control_flags |= + cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI); + logio->io_parameter[0] = + cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO); + } } logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; @@ -2806,7 +2762,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) (uint8_t *)els_iocb, sizeof(*els_iocb)); } else { - els_iocb->control_flags = cpu_to_le16(1 << 13); els_iocb->tx_byte_count = cpu_to_le32(sizeof(struct els_logo_payload)); put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, @@ -3106,6 +3061,43 @@ done: return rval; } +/* it is assume qpair lock is held */ +void qla_els_pt_iocb(struct scsi_qla_host *vha, + struct els_entry_24xx *els_iocb, + struct qla_els_pt_arg *a) +{ + els_iocb->entry_type = ELS_IOCB_TYPE; + els_iocb->entry_count = 1; + els_iocb->sys_define = 0; + els_iocb->entry_status = 0; + els_iocb->handle = QLA_SKIP_HANDLE; + els_iocb->nport_handle = a->nport_handle; + els_iocb->rx_xchg_address = a->rx_xchg_address; + els_iocb->tx_dsd_count = cpu_to_le16(1); + els_iocb->vp_index = a->vp_idx; + els_iocb->sof_type = EST_SOFI3; + els_iocb->rx_dsd_count = cpu_to_le16(0); + els_iocb->opcode = a->els_opcode; + + els_iocb->d_id[0] = a->did.b.al_pa; + els_iocb->d_id[1] = a->did.b.area; + els_iocb->d_id[2] = a->did.b.domain; + /* For SID the byte order is different than DID */ + els_iocb->s_id[1] = vha->d_id.b.al_pa; + els_iocb->s_id[2] = vha->d_id.b.area; + els_iocb->s_id[0] = vha->d_id.b.domain; + + els_iocb->control_flags = cpu_to_le16(a->control_flags); + + els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count); + els_iocb->tx_len = cpu_to_le32(a->tx_len); + put_unaligned_le64(a->tx_addr, &els_iocb->tx_address); + + els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count); + els_iocb->rx_len = cpu_to_le32(a->rx_len); + put_unaligned_le64(a->rx_addr, &els_iocb->rx_address); +} + static void qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { @@ -3700,6 +3692,16 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp, nack->u.isp24.srr_reject_code = 0; nack->u.isp24.srr_reject_code_expl = 0; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + if (ntfy->u.isp24.status_subcode == ELS_PLOGI && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) && + sp->vha->hw->flags.edif_enabled) { + ql_dbg(ql_dbg_disc, sp->vha, 0x3074, + "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n", + sp->name, sp->handle, sp->fcport->loop_id, + sp->fcport->d_id.b24); + nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); + } } /* @@ -3804,6 +3806,10 @@ qla2x00_start_sp(srb_t *sp) case SRB_ELS_CMD_HST: qla24xx_els_iocb(sp, pkt); break; + case SRB_ELS_CMD_HST_NOLOGIN: + qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg); + ((struct els_entry_24xx *)pkt)->handle = sp->handle; + break; case SRB_CT_CMD: IS_FWI2_CAPABLE(ha) ? qla24xx_ct_iocb(sp, pkt) : @@ -3851,6 +3857,12 @@ qla2x00_start_sp(srb_t *sp) case SRB_PRLO_CMD: qla24xx_prlo_iocb(sp, pkt); break; + case SRB_SA_UPDATE: + qla24xx_sa_update_iocb(sp, pkt); + break; + case SRB_SA_REPLACE: + qla24xx_sa_replace_iocb(sp, pkt); + break; default: break; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d9fb093a60a1..e8928fd83049 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -170,6 +170,149 @@ qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) } /** + * __qla_consume_iocb - this routine is used to tell fw driver has processed + * or consumed the head IOCB along with the continuation IOCB's from the + * provided respond queue. + * @vha: host adapter pointer + * @pkt: pointer to current packet. On return, this pointer shall move + * to the next packet. + * @rsp: respond queue pointer. + * + * it is assumed pkt is the head iocb, not the continuation iocbk + */ +void __qla_consume_iocb(struct scsi_qla_host *vha, + void **pkt, struct rsp_que **rsp) +{ + struct rsp_que *rsp_q = *rsp; + response_t *new_pkt; + uint16_t entry_count_remaining; + struct purex_entry_24xx *purex = *pkt; + + entry_count_remaining = purex->entry_count; + while (entry_count_remaining > 0) { + new_pkt = rsp_q->ring_ptr; + *pkt = new_pkt; + + rsp_q->ring_index++; + if (rsp_q->ring_index == rsp_q->length) { + rsp_q->ring_index = 0; + rsp_q->ring_ptr = rsp_q->ring; + } else { + rsp_q->ring_ptr++; + } + + new_pkt->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + --entry_count_remaining; + } +} + +/** + * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB + * and save to provided buffer + * @vha: host adapter pointer + * @pkt: pointer Purex IOCB + * @rsp: respond queue + * @buf: extracted ELS payload copy here + * @buf_len: buffer length + */ +int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, + void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len) +{ + struct purex_entry_24xx *purex = *pkt; + struct rsp_que *rsp_q = *rsp; + sts_cont_entry_t *new_pkt; + uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; + uint16_t buffer_copy_offset = 0; + uint16_t entry_count_remaining; + u16 tpad; + + entry_count_remaining = purex->entry_count; + total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) + - PURX_ELS_HEADER_SIZE; + + /* + * end of payload may not end in 4bytes boundary. Need to + * round up / pad for room to swap, before saving data + */ + tpad = roundup(total_bytes, 4); + + if (buf_len < tpad) { + ql_dbg(ql_dbg_async, vha, 0x5084, + "%s buffer is too small %d < %d\n", + __func__, buf_len, tpad); + __qla_consume_iocb(vha, pkt, rsp); + return -EIO; + } + + pending_bytes = total_bytes = tpad; + no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? + sizeof(purex->els_frame_payload) : pending_bytes; + + memcpy(buf, &purex->els_frame_payload[0], no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + + ((response_t *)purex)->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + + do { + while ((total_bytes > 0) && (entry_count_remaining > 0)) { + new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; + *pkt = new_pkt; + + if (new_pkt->entry_type != STATUS_CONT_TYPE) { + ql_log(ql_log_warn, vha, 0x507a, + "Unexpected IOCB type, partial data 0x%x\n", + buffer_copy_offset); + break; + } + + rsp_q->ring_index++; + if (rsp_q->ring_index == rsp_q->length) { + rsp_q->ring_index = 0; + rsp_q->ring_ptr = rsp_q->ring; + } else { + rsp_q->ring_ptr++; + } + no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? + sizeof(new_pkt->data) : pending_bytes; + if ((buffer_copy_offset + no_bytes) <= total_bytes) { + memcpy((buf + buffer_copy_offset), new_pkt->data, + no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + } else { + ql_log(ql_log_warn, vha, 0x5044, + "Attempt to copy more that we got, optimizing..%x\n", + buffer_copy_offset); + memcpy((buf + buffer_copy_offset), new_pkt->data, + total_bytes - buffer_copy_offset); + } + + ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + } + + if (pending_bytes != 0 || entry_count_remaining != 0) { + ql_log(ql_log_fatal, vha, 0x508b, + "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n", + total_bytes, entry_count_remaining); + return -EIO; + } + } while (entry_count_remaining > 0); + + be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2); + + return 0; +} + +/** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. * @irq: interrupt number * @dev_id: SCSI driver HA context @@ -1727,6 +1870,9 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, srb_t *sp; uint16_t index; + if (pkt->handle == QLA_SKIP_HANDLE) + return NULL; + index = LSW(pkt->handle); if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, @@ -1971,7 +2117,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, } static void -qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, +qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req, struct sts_entry_24xx *pkt, int iocb_type) { struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; @@ -1982,18 +2128,58 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, struct fc_bsg_reply *bsg_reply; uint16_t comp_status; uint32_t fw_status[3]; - int res; + int res, logit = 1; struct srb_iocb *els; + uint n; + scsi_qla_host_t *vha; + struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt; - sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + sp = qla2x00_get_sp_from_handle(v, func, req, pkt); if (!sp) return; + bsg_job = sp->u.bsg_job; + vha = sp->vha; type = NULL; + + comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); + fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); + fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); + switch (sp->type) { case SRB_ELS_CMD_RPT: case SRB_ELS_CMD_HST: + type = "rpt hst"; + break; + case SRB_ELS_CMD_HST_NOLOGIN: type = "els"; + { + struct els_entry_24xx *els = (void *)pkt; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + ql_dbg(ql_dbg_user, vha, 0x700f, + "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n", + __func__, sc_to_str(p->e.sub_cmd), + e->d_id[2], e->d_id[1], e->d_id[0], + comp_status, p->e.extra_rx_xchg_address, bsg_job); + + if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) { + if (sp->remap.remapped) { + n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + sp->remap.rsp.buf, + sp->remap.rsp.len); + ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e, + "%s: SG copied %x of %x\n", + __func__, n, sp->remap.rsp.len); + } else { + ql_dbg(ql_dbg_user, vha, 0x700f, + "%s: NOT REMAPPED (error)...!!!\n", + __func__); + } + } + } break; case SRB_CT_CMD: type = "ct pass-through"; @@ -2023,10 +2209,6 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, return; } - comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); - fw_status[1] = le32_to_cpu(ese->error_subcode_1); - fw_status[2] = le32_to_cpu(ese->error_subcode_2); - if (iocb_type == ELS_IOCB_TYPE) { els = &sp->u.iocb_cmd; els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); @@ -2040,15 +2222,52 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, res = DID_OK << 16; els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( ese->total_byte_count)); + + if (sp->remap.remapped && + ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) { + ql_dbg(ql_dbg_user, vha, 0x503f, + "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x", + __func__, e->s_id[0], e->s_id[2], e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + logit = 0; + } + + } else if (comp_status == CS_PORT_LOGGED_OUT) { + els->u.els_plogi.len = 0; + res = DID_IMM_RETRY << 16; } else { els->u.els_plogi.len = 0; res = DID_ERROR << 16; } + + if (logit) { + if (sp->remap.remapped && + ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) { + ql_dbg(ql_dbg_user, vha, 0x503f, + "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n", + type, sp->handle, comp_status); + + ql_dbg(ql_dbg_user, vha, 0x503f, + "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", + fw_status[1], fw_status[2], + le32_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count), + e->s_id[0], e->s_id[2], e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + } else { + ql_log(ql_log_info, vha, 0x503f, + "%s IOCB Done hdl=%x comp_status=0x%x\n", + type, sp->handle, comp_status); + ql_log(ql_log_info, vha, 0x503f, + "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", + fw_status[1], fw_status[2], + le32_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count), + e->s_id[0], e->s_id[2], e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + } + } } - ql_dbg(ql_dbg_disc, vha, 0x503f, - "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", - type, sp->handle, comp_status, fw_status[1], fw_status[2], - le32_to_cpu(ese->total_byte_count)); goto els_ct_done; } @@ -2153,6 +2372,10 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, if (sp->type != SRB_LOGIN_CMD) goto logio_done; + lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]); + if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP) + fcport->flags |= FCF_FCSP_DEVICE; + iop[0] = le32_to_cpu(logio->io_parameter[0]); if (iop[0] & BIT_4) { fcport->port_type = FCT_TARGET; @@ -2969,9 +3192,10 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } + /* Fast path completion. */ + qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24); sp->qpair->cmd_completion_cnt++; - /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { qla2x00_process_completed_request(vha, req, handle); @@ -3366,6 +3590,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) } break; + case SA_UPDATE_IOCB_TYPE: case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: @@ -3453,6 +3678,63 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, } /** + * qla_chk_cont_iocb_avail - check for all continuation iocbs are available + * before iocb processing can start. + * @vha: host adapter pointer + * @rsp: respond queue + * @pkt: head iocb describing how many continuation iocb + * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived. + */ +static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha, + struct rsp_que *rsp, response_t *pkt) +{ + int start_pkt_ring_index, end_pkt_ring_index, n_ring_index; + response_t *end_pkt; + int rc = 0; + u32 rsp_q_in; + + if (pkt->entry_count == 1) + return rc; + + /* ring_index was pre-increment. set it back to current pkt */ + if (rsp->ring_index == 0) + start_pkt_ring_index = rsp->length - 1; + else + start_pkt_ring_index = rsp->ring_index - 1; + + if ((start_pkt_ring_index + pkt->entry_count) >= rsp->length) + end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - + rsp->length - 1; + else + end_pkt_ring_index = start_pkt_ring_index + pkt->entry_count - 1; + + end_pkt = rsp->ring + end_pkt_ring_index; + + /* next pkt = end_pkt + 1 */ + n_ring_index = end_pkt_ring_index + 1; + if (n_ring_index >= rsp->length) + n_ring_index = 0; + + rsp_q_in = rsp->qpair->use_shadow_reg ? *rsp->in_ptr : + rd_reg_dword(rsp->rsp_q_in); + + /* rsp_q_in is either wrapped or pointing beyond endpkt */ + if ((rsp_q_in < start_pkt_ring_index && rsp_q_in < n_ring_index) || + rsp_q_in >= n_ring_index) + /* all IOCBs arrived. */ + rc = 0; + else + rc = -EIO; + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x5091, + "%s - ring %p pkt %p end pkt %p entry count %#x rsp_q_in %d rc %d\n", + __func__, rsp->ring, pkt, end_pkt, pkt->entry_count, + rsp_q_in, rc); + + return rc; +} + +/** * qla24xx_process_response_queue() - Process response queue entries. * @vha: SCSI driver HA context * @rsp: response queue @@ -3592,12 +3874,26 @@ process_err: qla27xx_process_purex_fpin); break; + case ELS_AUTH_ELS: + if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt)) { + ql_dbg(ql_dbg_init, vha, 0x5091, + "Defer processing ELS opcode %#x...\n", + purex_entry->els_frame_payload[3]); + return; + } + qla24xx_auth_els(vha, (void **)&pkt, &rsp); + break; default: ql_log(ql_log_warn, vha, 0x509c, "Discarding ELS Request opcode 0x%x\n", purex_entry->els_frame_payload[3]); } break; + case SA_UPDATE_IOCB_TYPE: + qla28xx_sa_update_iocb_entry(vha, rsp->req, + (struct sa_update_28xx *)pkt); + break; + default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 9f3ad8aa649c..4dd008e06617 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -739,7 +739,7 @@ again: mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; - mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; + mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; @@ -795,6 +795,12 @@ again: } } + if (IS_QLA28XX(ha) && (mcp->mb[5] & BIT_10) && ql2xsecenable) { + ha->flags.edif_enabled = 1; + ql_log(ql_log_info, vha, 0xffff, + "%s: edif is enabled\n", __func__); + } + done: ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, "Done %s.\n", __func__); @@ -4946,7 +4952,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, return rval; } -#define PUREX_CMD_COUNT 2 +#define PUREX_CMD_COUNT 4 int qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) { @@ -4954,6 +4960,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; uint8_t *els_cmd_map; + uint8_t active_cnt = 0; dma_addr_t els_cmd_map_dma; uint8_t cmd_opcode[PUREX_CMD_COUNT]; uint8_t i, index, purex_bit; @@ -4975,10 +4982,20 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) } /* List of Purex ELS */ - cmd_opcode[0] = ELS_FPIN; - cmd_opcode[1] = ELS_RDP; + if (ql2xrdpenable) { + cmd_opcode[active_cnt] = ELS_RDP; + active_cnt++; + } + if (ha->flags.scm_supported_f) { + cmd_opcode[active_cnt] = ELS_FPIN; + active_cnt++; + } + if (ha->flags.edif_enabled) { + cmd_opcode[active_cnt] = ELS_AUTH_ELS; + active_cnt++; + } - for (i = 0; i < PUREX_CMD_COUNT; i++) { + for (i = 0; i < active_cnt; i++) { index = cmd_opcode[i] / 8; purex_bit = cmd_opcode[i] % 8; els_cmd_map[index] |= 1 << purex_bit; @@ -6588,6 +6605,12 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; + ql_dbg(ql_dbg_disc, vha, 0x2062, + "%8phC SVC Param w3 %02x%02x", + fcport->port_name, + pd->prli_svc_param_word_3[1], + pd->prli_svc_param_word_3[0]); + if (NVME_TARGET(vha->hw, fcport)) { fcport->port_type = FCT_NVME; if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c7caf322f445..078d596dbd49 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -158,6 +158,10 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) int ret = QLA_SUCCESS; fc_port_t *fcport; + if (vha->hw->flags.edif_enabled) + /* delete sessions and flush sa_indexes */ + qla2x00_wait_for_sess_deletion(vha); + if (vha->hw->flags.fw_started) ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); @@ -166,7 +170,8 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->logout_on_delete = 0; - qla2x00_mark_all_devices_lost(vha); + if (!vha->hw->flags.edif_enabled) + qla2x00_wait_for_sess_deletion(vha); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->hardware_lock, flags); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 3e5c70a1d969..fdac3f7fa080 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -463,6 +463,10 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) } else if (fd->io_dir == 0) { cmd_pkt->control_flags = 0; } + + if (sp->fcport->edif.enable && fd->io_dir != 0) + cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); + /* Set BIT_13 of control flags for Async event */ if (vha->flags.nvme2_enabled && cmd->sqe.common.opcode == nvme_admin_async_event) { diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 615e44af1ca6..11aad97dfca8 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2166,7 +2166,6 @@ qla82xx_poll(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; - int status = 0; uint32_t stat; uint32_t host_int = 0; uint16_t mb[8]; @@ -2195,7 +2194,6 @@ qla82xx_poll(int irq, void *dev_id) case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); - status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index cedd558f65eb..126ac7e24ea9 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -53,6 +53,11 @@ static struct kmem_cache *ctx_cachep; */ uint ql_errlev = 0x8001; +int ql2xsecenable; +module_param(ql2xsecenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xsecenable, + "Enable/disable security. 0(Default) - Security disabled. 1 - Security enabled."); + static int ql2xenableclass2; module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); MODULE_PARM_DESC(ql2xenableclass2, @@ -849,7 +854,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) uint16_t hwq; struct qla_qpair *qpair = NULL; - tag = blk_mq_unique_tag(cmd->request); + tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); hwq = blk_mq_unique_tag_to_hwq(tag); qpair = ha->queue_pair_map[hwq]; @@ -1120,12 +1125,28 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; unsigned long flags; int res; + /* Return 0 = sleep, x=wake */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_init, vha, 0x00ec, "tgt %p, fcport_count=%d\n", vha, vha->fcport_count); res = (vha->fcport_count == 0); + if (res) { + struct fc_port *fcport; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->deleted != QLA_SESS_DELETED) { + /* session(s) may not be fully logged in + * (ie fcport_count=0), but session + * deletion thread(s) may be inflight. + */ + + res = 0; + break; + } + } + } spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return res; @@ -1742,7 +1763,7 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, } spin_lock_irqsave(qp->qp_lock_ptr, *flags); - if (ret_cmd && blk_mq_request_started(cmd->request)) + if (ret_cmd && blk_mq_request_started(scsi_cmd_to_rq(cmd))) sp->done(sp, res); } else { sp->done(sp, res); @@ -2835,6 +2856,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&ha->tgt.sess_lock); spin_lock_init(&ha->tgt.atio_lock); + spin_lock_init(&ha->sadb_lock); + INIT_LIST_HEAD(&ha->sadb_tx_index_list); + INIT_LIST_HEAD(&ha->sadb_rx_index_list); + + spin_lock_init(&ha->sadb_fp_lock); + + if (qla_edif_sadb_build_free_pool(ha)) { + kfree(ha); + goto disable_device; + } + atomic_set(&ha->nvme_active_aen_cnt, 0); /* Clear our data area */ @@ -3460,6 +3492,8 @@ skip_dpc: return 0; probe_failed: + qla_enode_stop(base_vha); + qla_edb_stop(base_vha); if (base_vha->gnl.l) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); @@ -3762,6 +3796,8 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); base_vha->gnl.l = NULL; + qla_enode_stop(base_vha); + qla_edb_stop(base_vha); vfree(base_vha->scan.l); @@ -3795,7 +3831,6 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_free_sysfs_attr(base_vha, true); fc_remove_host(base_vha->host); - qlt_remove_target_resources(ha); scsi_remove_host(base_vha->host); @@ -3867,6 +3902,9 @@ qla2x00_free_device(scsi_qla_host_t *vha) qla82xx_md_free(vha); + qla_edif_sadb_release_free_pool(ha); + qla_edif_sadb_release(ha); + qla2x00_free_queues(ha); } @@ -3919,6 +3957,8 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); qla2x00_schedule_rport_del(vha, fcport); } + + qla_edif_sess_down(vha, fcport); /* * We may need to retry the login, so don't change the state of the * port but do the retries. @@ -3972,15 +4012,20 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, struct req_que **req, struct rsp_que **rsp) { char name[16]; + int rc; ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL); if (!ha->init_cb) goto fail; - if (qlt_mem_alloc(ha) < 0) + rc = btree_init32(&ha->host_map); + if (rc) goto fail_free_init_cb; + if (qlt_mem_alloc(ha) < 0) + goto fail_free_btree; + ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); if (!ha->gid_list) @@ -3990,7 +4035,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->srb_mempool) goto fail_free_gid_list; - if (IS_P3P_TYPE(ha)) { + if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable && IS_QLA28XX(ha))) { /* Allocate cache for CT6 Ctx. */ if (!ctx_cachep) { ctx_cachep = kmem_cache_create("qla2xxx_ctx", @@ -4024,7 +4069,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); - if (IS_P3P_TYPE(ha) || ql2xenabledif) { + if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) && ql2xsecenable)) { ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, DSD_LIST_DMA_POOL_SIZE, 8, 0); if (!ha->dl_dma_pool) { @@ -4264,8 +4309,36 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, goto fail_flt_buffer; } + /* allocate the purex dma pool */ + ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, + MAX_PAYLOAD, 8, 0); + + if (!ha->purex_dma_pool) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, + "Unable to allocate purex_dma_pool.\n"); + goto fail_flt; + } + + ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16; + ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev, + ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL); + + if (!ha->elsrej.c) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, + "Alloc failed for els reject cmd.\n"); + goto fail_elsrej; + } + ha->elsrej.c->er_cmd = ELS_LS_RJT; + ha->elsrej.c->er_reason = ELS_RJT_BUSY; + ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA; return 0; +fail_elsrej: + dma_pool_destroy(ha->purex_dma_pool); +fail_flt: + dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + ha->flt, ha->flt_dma); + fail_flt_buffer: dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, ha->sfp_data_dma); @@ -4356,6 +4429,8 @@ fail_free_gid_list: ha->gid_list_dma = 0; fail_free_tgt_mem: qlt_mem_free(ha); +fail_free_btree: + btree_destroy32(&ha->host_map); fail_free_init_cb: dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); @@ -4772,10 +4847,21 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->dif_bundl_pool = NULL; qlt_mem_free(ha); + qla_remove_hostmap(ha); if (ha->init_cb) dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); + + dma_pool_destroy(ha->purex_dma_pool); + ha->purex_dma_pool = NULL; + + if (ha->elsrej.c) { + dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, + ha->elsrej.c, ha->elsrej.cdma); + ha->elsrej.c = NULL; + } + ha->init_cb = NULL; ha->init_cb_dma = 0; @@ -4837,6 +4923,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, spin_lock_init(&vha->cmd_list_lock); init_waitqueue_head(&vha->fcport_waitQ); init_waitqueue_head(&vha->vref_waitq); + qla_enode_init(vha); + qla_edb_init(vha); + vha->gnl.size = sizeof(struct get_name_list_extended) * (ha->max_loop_id + 1); @@ -5327,6 +5416,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, e->u.fcport.fcport, false); break; + case QLA_EVT_SA_REPLACE: + qla24xx_issue_sa_replace_iocb(vha, e); + break; } if (rc == EAGAIN) { @@ -5376,6 +5468,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) if (atomic_read(&fcport->state) != FCS_ONLINE && fcport->login_retry) { if (fcport->scan_state != QLA_FCPORT_FOUND || + fcport->disc_state == DSC_LOGIN_AUTH_PEND || fcport->disc_state == DSC_LOGIN_COMPLETE) continue; diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 060c89237777..a0aeba69513d 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -2936,7 +2936,6 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, liter += dburst - 1; faddr += dburst - 1; dwptr += dburst - 1; - continue; } write_protect: diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index eb47140a899f..c3a589659658 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -184,8 +184,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) return QLA_SUCCESS; } -static inline -struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, +struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id) { struct scsi_qla_host *host; @@ -198,7 +197,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, key = be_to_port_id(d_id).b24; - host = btree_lookup32(&vha->hw->tgt.host_map, key); + host = btree_lookup32(&vha->hw->host_map, key); if (!host) ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, "Unable to find host %06x\n", key); @@ -299,7 +298,7 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, goto abort; } - host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); + host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); if (host != NULL) { ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, "Requeuing unknown ATIO_TYPE7 %p\n", u); @@ -348,7 +347,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, switch (atio->u.raw.entry_type) { case ATIO_TYPE7: { - struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, + struct scsi_qla_host *host = qla_find_host_by_d_id(vha, atio->u.isp24.fcp_hdr.d_id); if (unlikely(NULL == host)) { ql_dbg(ql_dbg_tgt, vha, 0xe03e, @@ -577,6 +576,18 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res) sp->fcport->logout_on_delete = 1; sp->fcport->plogi_nack_done_deadline = jiffies + HZ; sp->fcport->send_els_logo = 0; + + if (sp->fcport->flags & FCF_FCSP_DEVICE) { + ql_dbg(ql_dbg_edif, vha, 0x20ef, + "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__, + sp->fcport->port_name); + qla2x00_set_fcport_disc_state(sp->fcport, + DSC_LOGIN_AUTH_PEND); + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + sp->fcport->d_id.b24); + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24, + 0, sp->fcport); + } break; case SRB_NACK_PRLI: @@ -624,6 +635,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, case SRB_NACK_PLOGI: fcport->fw_login_state = DSC_LS_PLOGI_PEND; c = "PLOGI"; + if (vha->hw->flags.edif_enabled && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { + fcport->flags |= FCF_FCSP_DEVICE; + } break; case SRB_NACK_PRLI: fcport->fw_login_state = DSC_LS_PRLI_PEND; @@ -693,7 +708,12 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) void qla24xx_delete_sess_fn(struct work_struct *work) { fc_port_t *fcport = container_of(work, struct fc_port, del_work); - struct qla_hw_data *ha = fcport->vha->hw; + struct qla_hw_data *ha = NULL; + + if (!fcport || !fcport->vha || !fcport->vha->hw) + return; + + ha = fcport->vha->hw; if (fcport->se_sess) { ha->tgt.tgt_ops->shutdown_sess(fcport); @@ -965,6 +985,19 @@ void qlt_free_session_done(struct work_struct *work) sess->send_els_logo); if (!IS_SW_RESV_ADDR(sess->d_id)) { + if (ha->flags.edif_enabled && + (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { + if (!ha->flags.host_shutting_down) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla2x00_release_all_sadb\n", + __func__, sess->port_name); + qla2x00_release_all_sadb(vha, sess); + } else { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s bypassing release_all_sadb\n", + __func__); + } + } qla2x00_mark_device_lost(vha, sess, 0); if (sess->send_els_logo) { @@ -972,6 +1005,7 @@ void qlt_free_session_done(struct work_struct *work) logo.id = sess->d_id; logo.cmd_count = 0; + INIT_LIST_HEAD(&logo.list); if (!own) qlt_send_first_logo(vha, &logo); sess->send_els_logo = 0; @@ -982,6 +1016,7 @@ void qlt_free_session_done(struct work_struct *work) if (!own || (own->iocb.u.isp24.status_subcode == ELS_PLOGI)) { + sess->logout_completed = 0; rc = qla2x00_post_async_logout_work(vha, sess, NULL); if (rc != QLA_SUCCESS) @@ -1278,8 +1313,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) qla24xx_chk_fcp_state(sess); ql_dbg(ql_log_warn, sess->vha, 0xe001, - "Scheduling sess %p for deletion %8phC\n", - sess, sess->port_name); + "Scheduling sess %p for deletion %8phC fc4_type %x\n", + sess, sess->port_name, sess->fc4_type); WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); } @@ -1720,6 +1755,12 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair, nack->u.isp24.srr_reject_code_expl = srr_explan; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + /* TODO qualify this with EDIF enable */ + if (ntfy->u.isp24.status_subcode == ELS_PLOGI && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { + nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); + } + ql_dbg(ql_dbg_tgt, vha, 0xe005, "qla_target(%d): Sending 24xx Notify Ack %d\n", vha->vp_idx, nack->u.isp24.status); @@ -2571,6 +2612,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, struct ctio7_to_24xx *pkt; struct atio_from_isp *atio = &prm->cmd->atio; uint16_t temp; + struct qla_tgt_cmd *cmd = prm->cmd; pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; prm->pkt = pkt; @@ -2603,6 +2645,15 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); + if (cmd->edif) { + if (cmd->dma_data_direction == DMA_TO_DEVICE) + prm->cmd->sess->edif.rx_bytes += cmd->bufflen; + if (cmd->dma_data_direction == DMA_FROM_DEVICE) + prm->cmd->sess->edif.tx_bytes += cmd->bufflen; + + pkt->u.status0.edif_flags |= EF_EN_EDIF; + } + return 0; } @@ -3293,8 +3344,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (xmit_type & QLA_TGT_XMIT_STATUS) { pkt->u.status0.scsi_status = cpu_to_le16(prm.rq_result); - pkt->u.status0.residual = - cpu_to_le32(prm.residual); + if (!cmd->edif) + pkt->u.status0.residual = + cpu_to_le32(prm.residual); + pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_SEND_STATUS); if (qlt_need_explicit_conf(cmd, 0)) { @@ -3941,6 +3994,12 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, if (cmd == NULL) return; + if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) && + cmd->sess) { + qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, + (struct ctio7_from_24xx *)ctio); + } + se_cmd = &cmd->se_cmd; cmd->cmd_sent_to_fw = 0; @@ -4011,6 +4070,16 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, qlt_handle_dif_error(qpair, cmd, ctio); return; } + + case CTIO_FAST_AUTH_ERR: + case CTIO_FAST_INCOMP_PAD_LEN: + case CTIO_FAST_INVALID_REQ: + case CTIO_FAST_SPI_ERR: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, + "qla_target(%d): CTIO with EDIF error status 0x%x received (state %x, se_cmd %p\n", + vha->vp_idx, status, cmd->state, se_cmd); + break; + default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", @@ -4312,6 +4381,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, qlt_assign_qpair(vha, cmd); cmd->reset_count = vha->hw->base_qpair->chip_reset; cmd->vp_idx = vha->vp_idx; + cmd->edif = sess->edif.enable; return cmd; } @@ -4727,6 +4797,17 @@ static int qlt_handle_login(struct scsi_qla_host *vha, goto out; } + if (vha->hw->flags.edif_enabled && + !(vha->e_dbell.db_flags & EDB_ACTIVE) && + iocb->u.isp24.status_subcode == ELS_PLOGI && + !(le16_to_cpu(iocb->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d Term INOT due to app not available lid=%d, NportID %06X ", + __func__, __LINE__, loop_id, port_id.b24); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); if (!pla) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, @@ -4792,6 +4873,16 @@ static int qlt_handle_login(struct scsi_qla_host *vha, qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); sess->d_id = port_id; sess->login_gen++; + sess->loop_id = loop_id; + + if (iocb->u.isp24.status_subcode == ELS_PLOGI) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC - send port online\n", + __func__, sess->port_name); + + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + sess->d_id.b24); + } if (iocb->u.isp24.status_subcode == ELS_PRLI) { sess->fw_login_state = DSC_LS_PRLI_PEND; @@ -6444,15 +6535,15 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) return 0; } -void qlt_remove_target_resources(struct qla_hw_data *ha) +void qla_remove_hostmap(struct qla_hw_data *ha) { struct scsi_qla_host *node; u32 key = 0; - btree_for_each_safe32(&ha->tgt.host_map, key, node) - btree_remove32(&ha->tgt.host_map, key); + btree_for_each_safe32(&ha->host_map, key, node) + btree_remove32(&ha->host_map, key); - btree_destroy32(&ha->tgt.host_map); + btree_destroy32(&ha->host_map); } static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, @@ -7080,8 +7171,7 @@ qlt_modify_vp_config(struct scsi_qla_host *vha, void qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) { - int rc; - + mutex_init(&base_vha->vha_tgt.tgt_mutex); if (!QLA_TGT_MODE_ENABLED()) return; @@ -7094,7 +7184,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; } - mutex_init(&base_vha->vha_tgt.tgt_mutex); mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); INIT_LIST_HEAD(&base_vha->unknown_atio_list); @@ -7103,11 +7192,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) qlt_clear_mode(base_vha); - rc = btree_init32(&ha->tgt.host_map); - if (rc) - ql_log(ql_log_info, base_vha, 0xd03d, - "Unable to initialize ha->host_map btree\n"); - qlt_update_vp_map(base_vha, SET_VP_IDX); } @@ -7228,21 +7312,20 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) u32 key; int rc; - if (!QLA_TGT_MODE_ENABLED()) - return; - key = vha->d_id.b24; switch (cmd) { case SET_VP_IDX: + if (!QLA_TGT_MODE_ENABLED()) + return; vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; break; case SET_AL_PA: - slot = btree_lookup32(&vha->hw->tgt.host_map, key); + slot = btree_lookup32(&vha->hw->host_map, key); if (!slot) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, "Save vha in host_map %p %06x\n", vha, key); - rc = btree_insert32(&vha->hw->tgt.host_map, + rc = btree_insert32(&vha->hw->host_map, key, vha, GFP_ATOMIC); if (rc) ql_log(ql_log_info, vha, 0xd03e, @@ -7252,17 +7335,19 @@ qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, "replace existing vha in host_map %p %06x\n", vha, key); - btree_update32(&vha->hw->tgt.host_map, key, vha); + btree_update32(&vha->hw->host_map, key, vha); break; case RESET_VP_IDX: + if (!QLA_TGT_MODE_ENABLED()) + return; vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; break; case RESET_AL_PA: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, "clear vha in host_map %p %06x\n", vha, key); - slot = btree_lookup32(&vha->hw->tgt.host_map, key); + slot = btree_lookup32(&vha->hw->host_map, key); if (slot) - btree_remove32(&vha->hw->tgt.host_map, key); + btree_remove32(&vha->hw->host_map, key); vha->d_id.b24 = 0; break; } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 01620f3eab39..156b950ca7e7 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -176,6 +176,7 @@ struct nack_to_isp { uint8_t reserved[2]; __le16 ox_id; } __packed; +#define NOTIFY_ACK_FLAGS_FCSP BIT_5 #define NOTIFY_ACK_FLAGS_TERMINATE BIT_3 #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 #define NOTIFY_ACK_SRR_FLAGS_REJECT 1 @@ -238,6 +239,10 @@ struct ctio_to_2xxx { #define CTIO_PORT_LOGGED_OUT 0x29 #define CTIO_PORT_CONF_CHANGED 0x2A #define CTIO_SRR_RECEIVED 0x45 +#define CTIO_FAST_AUTH_ERR 0x63 +#define CTIO_FAST_INCOMP_PAD_LEN 0x65 +#define CTIO_FAST_INVALID_REQ 0x66 +#define CTIO_FAST_SPI_ERR 0x67 #endif #ifndef CTIO_RET_TYPE @@ -408,7 +413,16 @@ struct ctio7_to_24xx { struct { __le16 reserved1; __le16 flags; - __le32 residual; + union { + __le32 residual; + struct { + uint8_t rsvd1; + uint8_t edif_flags; +#define EF_EN_EDIF BIT_0 +#define EF_NEW_SA BIT_1 + uint16_t rsvd2; + }; + }; __le16 ox_id; __le16 scsi_status; __le32 relative_offset; @@ -446,7 +460,7 @@ struct ctio7_from_24xx { uint8_t vp_index; uint8_t reserved1[5]; __le32 exchange_address; - __le16 reserved2; + __le16 edif_sa_index; __le16 flags; __le32 residual; __le16 ox_id; @@ -875,6 +889,7 @@ struct qla_tgt_cmd { unsigned int term_exchg:1; unsigned int cmd_sent_to_fw:1; unsigned int cmd_in_wq:1; + unsigned int edif:1; /* * This variable may be set from outside the LIO and I/O completion diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index da11829fa12d..2e05dd74b5cb 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -6,9 +6,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.02.00.106-k" +#define QLA2XXX_VERSION "10.02.00.107-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 2 #define QLA_DRIVER_PATCH_VER 0 -#define QLA_DRIVER_BETA_VER 106 +#define QLA_DRIVER_BETA_VER 107 diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index f786ac2f5548..301bc09c8365 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -119,8 +119,8 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha) * the interrupt_handler to think there are responses to be * processed when there aren't. */ - ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0); - ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0); + ha->shadow_regs->req_q_out = cpu_to_le32(0); + ha->shadow_regs->rsp_q_in = cpu_to_le32(0); wmb(); writel(0, &ha->reg->req_q_in); diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index cbd1e6ffcd67..28eab07935ba 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -160,7 +160,7 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb, if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { /* No data being transferred */ - cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0); + cmd_entry->ttlByteCnt = cpu_to_le32(0); return; } @@ -288,7 +288,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) /* Acquire hardware specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); - index = (uint32_t)cmd->request->tag; + index = scsi_cmd_to_rq(cmd)->tag; /* * Check to see if adapter is online before placing request on diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 187d78aa4f67..cd71074f3abe 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -645,8 +645,8 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) /* Fill in the request and response queue information. */ init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out); init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in); - init_fw_cb->rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH); - init_fw_cb->compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH); + init_fw_cb->rqq_len = cpu_to_le16(REQUEST_QUEUE_DEPTH); + init_fw_cb->compq_len = cpu_to_le16(RESPONSE_QUEUE_DEPTH); init_fw_cb->rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma)); init_fw_cb->rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma)); init_fw_cb->compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma)); @@ -656,20 +656,20 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) /* Set up required options. */ init_fw_cb->fw_options |= - __constant_cpu_to_le16(FWOPT_SESSION_MODE | - FWOPT_INITIATOR_MODE); + cpu_to_le16(FWOPT_SESSION_MODE | + FWOPT_INITIATOR_MODE); if (is_qla80XX(ha)) init_fw_cb->fw_options |= - __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB); + cpu_to_le16(FWOPT_ENABLE_CRBDB); - init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); + init_fw_cb->fw_options &= cpu_to_le16(~FWOPT_TARGET_MODE); init_fw_cb->add_fw_options = 0; init_fw_cb->add_fw_options |= - __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT); + cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT); init_fw_cb->add_fw_options |= - __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE); + cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE); if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != QLA_SUCCESS) { @@ -1613,7 +1613,7 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); - chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); + chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE); exit_get_chap: dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); @@ -1655,7 +1655,7 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, chap_table->secret_len = strlen(password); strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1); strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1); - chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); + chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE); if (is_qla40XX(ha)) { chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table); @@ -1721,7 +1721,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, mutex_lock(&ha->chap_sem); chap_table = (struct ql4_chap_table *)ha->chap_list + chap_index; - if (chap_table->cookie != __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + if (chap_table->cookie != cpu_to_le16(CHAP_VALID_COOKIE)) { rval = QLA_ERROR; goto exit_unlock_uni_chap; } @@ -1784,7 +1784,7 @@ int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, for (i = 0; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if (chap_table->cookie != - __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + cpu_to_le16(CHAP_VALID_COOKIE)) { if (i > MAX_RESRV_CHAP_IDX && free_index == -1) free_index = i; continue; @@ -2105,18 +2105,18 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha, if (conn->max_recv_dlength) fw_ddb_entry->iscsi_max_rcv_data_seg_len = - __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS)); + cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS)); if (sess->max_r2t) fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); if (sess->first_burst) fw_ddb_entry->iscsi_first_burst_len = - __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS)); + cpu_to_le16((sess->first_burst / BYTE_UNITS)); if (sess->max_burst) fw_ddb_entry->iscsi_max_burst_len = - __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS)); + cpu_to_le16((sess->max_burst / BYTE_UNITS)); if (sess->time2wait) fw_ddb_entry->iscsi_def_time2wait = diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 66a487795c53..47adff9f0506 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -3658,7 +3658,7 @@ qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr, "Do ROM fast read failed\n"); goto done_read; } - dwptr[i] = __constant_cpu_to_le32(val); + dwptr[i] = cpu_to_le32(val); } done_read: @@ -3721,9 +3721,9 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr) goto no_flash_data; } - if (*wptr == __constant_cpu_to_le16(0xffff)) + if (*wptr == cpu_to_le16(0xffff)) goto no_flash_data; - if (flt->version != __constant_cpu_to_le16(1)) { + if (flt->version != cpu_to_le16(1)) { DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: " "version=0x%x length=0x%x checksum=0x%x.\n", le16_to_cpu(flt->version), le16_to_cpu(flt->length), @@ -3826,7 +3826,7 @@ qla4_82xx_get_fdt_info(struct scsi_qla_host *ha) qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, hw->flt_region_fdt << 2, OPTROM_BURST_SIZE); - if (*wptr == __constant_cpu_to_le16(0xffff)) + if (*wptr == cpu_to_le16(0xffff)) goto no_flash_data; if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || @@ -3883,7 +3883,7 @@ qla4_82xx_get_idc_param(struct scsi_qla_host *ha) qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, QLA82XX_IDC_PARAM_ADDR , 8); - if (*wptr == __constant_cpu_to_le32(0xffffffff)) { + if (*wptr == cpu_to_le32(0xffffffff)) { ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT; ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT; } else { diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 6ee7ea4c27e0..f1ea65c6e5f5 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -702,7 +702,7 @@ static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, *chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; if ((*chap_entry)->cookie != - __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + cpu_to_le16(CHAP_VALID_COOKIE)) { *chap_entry = NULL; } else { rval = QLA_SUCCESS; @@ -745,7 +745,7 @@ static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, chap_table = (struct ql4_chap_table *)ha->chap_list + i; if ((chap_table->cookie != - __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && + cpu_to_le16(CHAP_VALID_COOKIE)) && (i > MAX_RESRV_CHAP_IDX)) { free_index = i; break; @@ -794,7 +794,7 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, for (i = chap_tbl_idx; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if (chap_table->cookie != - __constant_cpu_to_le16(CHAP_VALID_COOKIE)) + cpu_to_le16(CHAP_VALID_COOKIE)) continue; chap_rec->chap_tbl_idx = i; @@ -923,7 +923,7 @@ static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx) goto exit_delete_chap; } - chap_table->cookie = __constant_cpu_to_le16(0xFFFF); + chap_table->cookie = cpu_to_le16(0xFFFF); offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * sizeof(struct ql4_chap_table)); @@ -6043,7 +6043,7 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, for (i = 0; i < max_chap_entries; i++) { chap_table = (struct ql4_chap_table *)ha->chap_list + i; if (chap_table->cookie != - __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + cpu_to_le16(CHAP_VALID_COOKIE)) { continue; } @@ -9282,7 +9282,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd) DEBUG2(printk(KERN_INFO "scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x," "dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, - cmd, jiffies, cmd->request->timeout / HZ, + cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); rval = qla4xxx_isp_check_reg(ha); @@ -9349,7 +9349,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd) DEBUG2(printk(KERN_INFO "scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, " "to=%x,dpc_flags=%lx, status=%x allowed=%d\n", - ha->host_no, cmd, jiffies, cmd->request->timeout / HZ, + ha->host_no, cmd, jiffies, scsi_cmd_to_rq(cmd)->timeout / HZ, ha->dpc_flags, cmd->result, cmd->allowed)); rval = qla4xxx_isp_check_reg(ha); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index d84e218d32cb..8e7e833a36cc 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -890,7 +890,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd, cmd->control_flags |= CFLAG_WRITE; else cmd->control_flags |= CFLAG_READ; - cmd->time_out = Cmnd->request->timeout/HZ; + cmd->time_out = scsi_cmd_to_rq(Cmnd)->timeout / HZ; memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); } diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index d26025cf5de3..b241f9e3885c 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -190,7 +190,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) "(result %x)\n", cmd->result)); good_bytes = scsi_bufflen(cmd); - if (!blk_rq_is_passthrough(cmd->request)) { + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { int old_good_bytes = good_bytes; drv = scsi_cmd_to_driver(cmd); if (drv->done) diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c new file mode 100644 index 000000000000..81c3853a2a80 --- /dev/null +++ b/drivers/scsi/scsi_bsg.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bsg.h> +#include <scsi/scsi.h> +#include <scsi/scsi_ioctl.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/sg.h> +#include "scsi_priv.h" + +#define uptr64(val) ((void __user *)(uintptr_t)(val)) + +static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr, + fmode_t mode, unsigned int timeout) +{ + struct scsi_request *sreq; + struct request *rq; + struct bio *bio; + int ret; + + if (hdr->protocol != BSG_PROTOCOL_SCSI || + hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) + return -EINVAL; + if (hdr->dout_xfer_len && hdr->din_xfer_len) { + pr_warn_once("BIDI support in bsg has been removed.\n"); + return -EOPNOTSUPP; + } + + rq = blk_get_request(q, hdr->dout_xfer_len ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + rq->timeout = timeout; + + ret = -ENOMEM; + sreq = scsi_req(rq); + sreq->cmd_len = hdr->request_len; + if (sreq->cmd_len > BLK_MAX_CDB) { + sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL); + if (!sreq->cmd) + goto out_put_request; + } + + ret = -EFAULT; + if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len)) + goto out_free_cmd; + ret = -EPERM; + if (!scsi_cmd_allowed(sreq->cmd, mode)) + goto out_free_cmd; + + ret = 0; + if (hdr->dout_xfer_len) { + ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp), + hdr->dout_xfer_len, GFP_KERNEL); + } else if (hdr->din_xfer_len) { + ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp), + hdr->din_xfer_len, GFP_KERNEL); + } + + if (ret) + goto out_free_cmd; + + bio = rq->bio; + blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL)); + + /* + * fill in all the output members + */ + hdr->device_status = sreq->result & 0xff; + hdr->transport_status = host_byte(sreq->result); + hdr->driver_status = 0; + if (scsi_status_is_check_condition(sreq->result)) + hdr->driver_status = DRIVER_SENSE; + hdr->info = 0; + if (hdr->device_status || hdr->transport_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->response_len = 0; + + if (sreq->sense_len && hdr->response) { + int len = min_t(unsigned int, hdr->max_response_len, + sreq->sense_len); + + if (copy_to_user(uptr64(hdr->response), sreq->sense, len)) + ret = -EFAULT; + else + hdr->response_len = len; + } + + if (rq_data_dir(rq) == READ) + hdr->din_resid = sreq->resid_len; + else + hdr->dout_resid = sreq->resid_len; + + blk_rq_unmap_user(bio); + +out_free_cmd: + scsi_req_free_cmd(scsi_req(rq)); +out_put_request: + blk_put_request(rq); + return ret; +} + +struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev) +{ + return bsg_register_queue(sdev->request_queue, &sdev->sdev_gendev, + dev_name(&sdev->sdev_gendev), scsi_bsg_sg_io_fn); +} diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c index 90349498f686..6e50e81a8216 100644 --- a/drivers/scsi/scsi_common.c +++ b/drivers/scsi/scsi_common.c @@ -7,9 +7,18 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> +#include <linux/module.h> #include <asm/unaligned.h> #include <scsi/scsi_common.h> +MODULE_LICENSE("GPL v2"); + +/* Command group 3 is reserved and should never be used. */ +const unsigned char scsi_command_size_tbl[8] = { + 6, 10, 10, 12, 16, 12, 10, 10 +}; +EXPORT_SYMBOL(scsi_command_size_tbl); + /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. * You may not alter any existing entry (although adding new ones is * encouraged once assigned by ANSI/INCITS T10). diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 5b3a20a140f9..31529d8add0d 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3076,6 +3076,7 @@ static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector, static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec, unsigned int sectors, u32 ei_lba) { + int ret = 0; unsigned int i; sector_t sector; struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *) @@ -3083,26 +3084,33 @@ static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec, struct t10_pi_tuple *sdt; for (i = 0; i < sectors; i++, ei_lba++) { - int ret; - sector = start_sec + i; sdt = dif_store(sip, sector); if (sdt->app_tag == cpu_to_be16(0xffff)) continue; - ret = dif_verify(sdt, lba2fake_store(sip, sector), sector, - ei_lba); - if (ret) { - dif_errors++; - return ret; + /* + * Because scsi_debug acts as both initiator and + * target we proceed to verify the PI even if + * RDPROTECT=3. This is done so the "initiator" knows + * which type of error to return. Otherwise we would + * have to iterate over the PI twice. + */ + if (scp->cmnd[1] >> 5) { /* RDPROTECT */ + ret = dif_verify(sdt, lba2fake_store(sip, sector), + sector, ei_lba); + if (ret) { + dif_errors++; + break; + } } } dif_copy_prot(scp, start_sec, sectors, true); dix_reads++; - return 0; + return ret; } static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) @@ -3196,12 +3204,29 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) /* DIX + T10 DIF */ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { - int prot_ret = prot_verify_read(scp, lba, num, ei_lba); - - if (prot_ret) { - read_unlock(macc_lckp); - mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, prot_ret); - return illegal_condition_result; + switch (prot_verify_read(scp, lba, num, ei_lba)) { + case 1: /* Guard tag error */ + if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ + read_unlock(macc_lckp); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + return check_condition_result; + } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { + read_unlock(macc_lckp); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + return illegal_condition_result; + } + break; + case 3: /* Reference tag error */ + if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */ + read_unlock(macc_lckp); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); + return check_condition_result; + } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) { + read_unlock(macc_lckp); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); + return illegal_condition_result; + } + break; } } @@ -3232,28 +3257,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) return 0; } -static void dump_sector(unsigned char *buf, int len) -{ - int i, j, n; - - pr_err(">>> Sector Dump <<<\n"); - for (i = 0 ; i < len ; i += 16) { - char b[128]; - - for (j = 0, n = 0; j < 16; j++) { - unsigned char c = buf[i+j]; - - if (c >= 0x20 && c < 0x7e) - n += scnprintf(b + n, sizeof(b) - n, - " %c ", buf[i+j]); - else - n += scnprintf(b + n, sizeof(b) - n, - "%02x ", buf[i+j]); - } - pr_err("%04d: %s\n", i, b); - } -} - static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, unsigned int sectors, u32 ei_lba) { @@ -3299,10 +3302,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, sdt = piter.addr + ppage_offset; daddr = diter.addr + dpage_offset; - ret = dif_verify(sdt, daddr, sector, ei_lba); - if (ret) { - dump_sector(daddr, sdebug_sector_size); - goto out; + if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */ + ret = dif_verify(sdt, daddr, sector, ei_lba); + if (ret) + goto out; } sector++; @@ -3480,12 +3483,29 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) /* DIX + T10 DIF */ if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) { - int prot_ret = prot_verify_write(scp, lba, num, ei_lba); - - if (prot_ret) { - write_unlock(macc_lckp); - mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, prot_ret); - return illegal_condition_result; + switch (prot_verify_write(scp, lba, num, ei_lba)) { + case 1: /* Guard tag error */ + if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) { + write_unlock(macc_lckp); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); + return illegal_condition_result; + } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ + write_unlock(macc_lckp); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); + return check_condition_result; + } + break; + case 3: /* Reference tag error */ + if (scp->prot_flags & SCSI_PROT_REF_CHECK) { + write_unlock(macc_lckp); + mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3); + return illegal_condition_result; + } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */ + write_unlock(macc_lckp); + mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3); + return check_condition_result; + } + break; } } @@ -4702,7 +4722,7 @@ fini: static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd) { u16 hwq; - u32 tag = blk_mq_unique_tag(cmnd->request); + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); hwq = blk_mq_unique_tag_to_hwq(tag); @@ -4715,7 +4735,7 @@ static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd) static u32 get_tag(struct scsi_cmnd *cmnd) { - return blk_mq_unique_tag(cmnd->request); + return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)); } /* Queued (deferred) command completions converge here. */ @@ -5364,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, { bool new_sd_dp; bool inject = false; - bool hipri = (cmnd->request->cmd_flags & REQ_HIPRI); + bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI; int k, num_in_q, qdepth; unsigned long iflags; u64 ns_from_boot = 0; @@ -5567,8 +5587,9 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (sdebug_statistics) sd_dp->issuing_cpu = raw_smp_processor_id(); if (unlikely(sd_dp->aborted)) { - sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag); - blk_abort_request(cmnd->request); + sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", + scsi_cmd_to_rq(cmnd)->tag); + blk_abort_request(scsi_cmd_to_rq(cmnd)); atomic_set(&sdeb_inject_pending, 0); sd_dp->aborted = false; } @@ -7394,7 +7415,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, (u32)cmd[k]); } sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name, - blk_mq_unique_tag(scp->request), b); + blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b); } if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY))) return SCSI_MLQUEUE_HOST_BUSY; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index d33355ab6e14..c7080454aea9 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -171,6 +171,7 @@ static struct { {"FUJITSU", "ETERNUS_DXM", "*", BLIST_RETRY_ASC_C1}, {"Generic", "USB SD Reader", "1.00", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"Generic", "USB Storage-SMC", NULL, BLIST_FORCELUN | BLIST_INQUIRY_36}, /* FW: 0180 and 0207 */ + {"Generic", "Ultra HS-SD/MMC", "2.09", BLIST_IGN_MEDIA_CHANGE | BLIST_INQUIRY_36}, {"HITACHI", "DF400", "*", BLIST_REPORTLUN2}, {"HITACHI", "DF500", "*", BLIST_REPORTLUN2}, {"HITACHI", "DISK-SUBSYSTEM", "*", BLIST_REPORTLUN2}, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 58a252c38992..b6c86cce57bf 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -242,7 +242,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) */ static void scsi_eh_reset(struct scsi_cmnd *scmd) { - if (!blk_rq_is_passthrough(scmd->request)) { + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) { struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); if (sdrv->eh_reset) sdrv->eh_reset(scmd); @@ -1182,7 +1182,7 @@ static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd) static enum scsi_disposition scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn) { - if (!blk_rq_is_passthrough(scmd->request)) { + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) { struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); if (sdrv->eh_action) rtn = sdrv->eh_action(scmd, rtn); @@ -1750,21 +1750,23 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q, */ int scsi_noretry_cmd(struct scsi_cmnd *scmd) { + struct request *req = scsi_cmd_to_rq(scmd); + switch (host_byte(scmd->result)) { case DID_OK: break; case DID_TIME_OUT: goto check_type; case DID_BUS_BUSY: - return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT); + return req->cmd_flags & REQ_FAILFAST_TRANSPORT; case DID_PARITY: - return (scmd->request->cmd_flags & REQ_FAILFAST_DEV); + return req->cmd_flags & REQ_FAILFAST_DEV; case DID_ERROR: if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT) return 0; fallthrough; case DID_SOFT_ERROR: - return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); + return req->cmd_flags & REQ_FAILFAST_DRIVER; } if (!scsi_status_is_check_condition(scmd->result)) @@ -1775,8 +1777,7 @@ check_type: * assume caller has checked sense and determined * the check condition was retryable. */ - if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || - blk_rq_is_passthrough(scmd->request)) + if (req->cmd_flags & REQ_FAILFAST_DEV || blk_rq_is_passthrough(req)) return 1; return 0; @@ -2376,7 +2377,6 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) scmd = (struct scsi_cmnd *)(rq + 1); scsi_init_command(dev, scmd); - scmd->request = rq; scmd->cmnd = scsi_req(rq)->cmd; scmd->scsi_done = scsi_reset_provider_done_command; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 0d13610cd6bf..7b2b0a1581f4 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/string.h> #include <linux/uaccess.h> +#include <linux/cdrom.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -63,29 +64,6 @@ static int ioctl_probe(struct Scsi_Host *host, void __user *buffer) return 1; } -/* - - * The SCSI_IOCTL_SEND_COMMAND ioctl sends a command out to the SCSI host. - * The IOCTL_NORMAL_TIMEOUT and NORMAL_RETRIES variables are used. - * - * dev is the SCSI device struct ptr, *(int *) arg is the length of the - * input data, if any, not including the command string & counts, - * *((int *)arg + 1) is the output buffer size in bytes. - * - * *(char *) ((int *) arg)[2] the actual command byte. - * - * Note that if more than MAX_BUF bytes are requested to be transferred, - * the ioctl will fail with error EINVAL. - * - * This size *does not* include the initial lengths that were passed. - * - * The SCSI command is read from the memory location immediately after the - * length words, and the input data is right after the command. The SCSI - * routines know the command size based on the opcode decode. - * - * The output area is then filled in starting from the command byte. - */ - static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, int timeout, int retries) { @@ -189,10 +167,732 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) ? -EFAULT: 0; } +static int sg_get_version(int __user *p) +{ + static const int sg_version_num = 30527; + return put_user(sg_version_num, p); +} -static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg) +static int sg_set_timeout(struct scsi_device *sdev, int __user *p) { - char scsi_cmd[MAX_COMMAND_SIZE]; + int timeout, err = get_user(timeout, p); + + if (!err) + sdev->sg_timeout = clock_t_to_jiffies(timeout); + + return err; +} + +static int sg_get_reserved_size(struct scsi_device *sdev, int __user *p) +{ + int val = min(sdev->sg_reserved_size, + queue_max_bytes(sdev->request_queue)); + + return put_user(val, p); +} + +static int sg_set_reserved_size(struct scsi_device *sdev, int __user *p) +{ + int size, err = get_user(size, p); + + if (err) + return err; + + if (size < 0) + return -EINVAL; + + sdev->sg_reserved_size = min_t(unsigned int, size, + queue_max_bytes(sdev->request_queue)); + return 0; +} + +/* + * will always return that we are ATAPI even for a real SCSI drive, I'm not + * so sure this is worth doing anything about (why would you care??) + */ +static int sg_emulated_host(struct request_queue *q, int __user *p) +{ + return put_user(1, p); +} + +static int scsi_get_idlun(struct scsi_device *sdev, void __user *argp) +{ + struct scsi_idlun v = { + .dev_id = (sdev->id & 0xff) + + ((sdev->lun & 0xff) << 8) + + ((sdev->channel & 0xff) << 16) + + ((sdev->host->host_no & 0xff) << 24), + .host_unique_id = sdev->host->unique_id + }; + if (copy_to_user(argp, &v, sizeof(struct scsi_idlun))) + return -EFAULT; + return 0; +} + +static int scsi_send_start_stop(struct scsi_device *sdev, int data) +{ + u8 cdb[MAX_COMMAND_SIZE] = { }; + + cdb[0] = START_STOP; + cdb[4] = data; + return ioctl_internal_command(sdev, cdb, START_STOP_TIMEOUT, + NORMAL_RETRIES); +} + +/* + * Check if the given command is allowed. + * + * Only a subset of commands are allowed for unprivileged users. Commands used + * to format the media, update the firmware, etc. are not permitted. + */ +bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode) +{ + /* root can do any command. */ + if (capable(CAP_SYS_RAWIO)) + return true; + + /* Anybody who can open the device can do a read-safe command */ + switch (cmd[0]) { + /* Basic read-only commands */ + case TEST_UNIT_READY: + case REQUEST_SENSE: + case READ_6: + case READ_10: + case READ_12: + case READ_16: + case READ_BUFFER: + case READ_DEFECT_DATA: + case READ_CAPACITY: /* also GPCMD_READ_CDVD_CAPACITY */ + case READ_LONG: + case INQUIRY: + case MODE_SENSE: + case MODE_SENSE_10: + case LOG_SENSE: + case START_STOP: + case GPCMD_VERIFY_10: + case VERIFY_16: + case REPORT_LUNS: + case SERVICE_ACTION_IN_16: + case RECEIVE_DIAGNOSTIC: + case MAINTENANCE_IN: /* also GPCMD_SEND_KEY, which is a write command */ + case GPCMD_READ_BUFFER_CAPACITY: + /* Audio CD commands */ + case GPCMD_PLAY_CD: + case GPCMD_PLAY_AUDIO_10: + case GPCMD_PLAY_AUDIO_MSF: + case GPCMD_PLAY_AUDIO_TI: + case GPCMD_PAUSE_RESUME: + /* CD/DVD data reading */ + case GPCMD_READ_CD: + case GPCMD_READ_CD_MSF: + case GPCMD_READ_DISC_INFO: + case GPCMD_READ_DVD_STRUCTURE: + case GPCMD_READ_HEADER: + case GPCMD_READ_TRACK_RZONE_INFO: + case GPCMD_READ_SUBCHANNEL: + case GPCMD_READ_TOC_PMA_ATIP: + case GPCMD_REPORT_KEY: + case GPCMD_SCAN: + case GPCMD_GET_CONFIGURATION: + case GPCMD_READ_FORMAT_CAPACITIES: + case GPCMD_GET_EVENT_STATUS_NOTIFICATION: + case GPCMD_GET_PERFORMANCE: + case GPCMD_SEEK: + case GPCMD_STOP_PLAY_SCAN: + /* ZBC */ + case ZBC_IN: + return true; + /* Basic writing commands */ + case WRITE_6: + case WRITE_10: + case WRITE_VERIFY: + case WRITE_12: + case WRITE_VERIFY_12: + case WRITE_16: + case WRITE_LONG: + case WRITE_LONG_2: + case WRITE_SAME: + case WRITE_SAME_16: + case WRITE_SAME_32: + case ERASE: + case GPCMD_MODE_SELECT_10: + case MODE_SELECT: + case LOG_SELECT: + case GPCMD_BLANK: + case GPCMD_CLOSE_TRACK: + case GPCMD_FLUSH_CACHE: + case GPCMD_FORMAT_UNIT: + case GPCMD_REPAIR_RZONE_TRACK: + case GPCMD_RESERVE_RZONE_TRACK: + case GPCMD_SEND_DVD_STRUCTURE: + case GPCMD_SEND_EVENT: + case GPCMD_SEND_OPC: + case GPCMD_SEND_CUE_SHEET: + case GPCMD_SET_SPEED: + case GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL: + case GPCMD_LOAD_UNLOAD: + case GPCMD_SET_STREAMING: + case GPCMD_SET_READ_AHEAD: + /* ZBC */ + case ZBC_OUT: + return (mode & FMODE_WRITE); + default: + return false; + } +} +EXPORT_SYMBOL(scsi_cmd_allowed); + +static int scsi_fill_sghdr_rq(struct scsi_device *sdev, struct request *rq, + struct sg_io_hdr *hdr, fmode_t mode) +{ + struct scsi_request *req = scsi_req(rq); + + if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len)) + return -EFAULT; + if (!scsi_cmd_allowed(req->cmd, mode)) + return -EPERM; + + /* + * fill in request structure + */ + req->cmd_len = hdr->cmd_len; + + rq->timeout = msecs_to_jiffies(hdr->timeout); + if (!rq->timeout) + rq->timeout = sdev->sg_timeout; + if (!rq->timeout) + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + if (rq->timeout < BLK_MIN_SG_TIMEOUT) + rq->timeout = BLK_MIN_SG_TIMEOUT; + + return 0; +} + +static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, + struct bio *bio) +{ + struct scsi_request *req = scsi_req(rq); + int r, ret = 0; + + /* + * fill in all the output members + */ + hdr->status = req->result & 0xff; + hdr->masked_status = status_byte(req->result); + hdr->msg_status = COMMAND_COMPLETE; + hdr->host_status = host_byte(req->result); + hdr->driver_status = 0; + if (scsi_status_is_check_condition(hdr->status)) + hdr->driver_status = DRIVER_SENSE; + hdr->info = 0; + if (hdr->masked_status || hdr->host_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->resid = req->resid_len; + hdr->sb_len_wr = 0; + + if (req->sense_len && hdr->sbp) { + int len = min((unsigned int) hdr->mx_sb_len, req->sense_len); + + if (!copy_to_user(hdr->sbp, req->sense, len)) + hdr->sb_len_wr = len; + else + ret = -EFAULT; + } + + r = blk_rq_unmap_user(bio); + if (!ret) + ret = r; + + return ret; +} + +static int sg_io(struct scsi_device *sdev, struct gendisk *disk, + struct sg_io_hdr *hdr, fmode_t mode) +{ + unsigned long start_time; + ssize_t ret = 0; + int writing = 0; + int at_head = 0; + struct request *rq; + struct scsi_request *req; + struct bio *bio; + + if (hdr->interface_id != 'S') + return -EINVAL; + + if (hdr->dxfer_len > (queue_max_hw_sectors(sdev->request_queue) << 9)) + return -EIO; + + if (hdr->dxfer_len) + switch (hdr->dxfer_direction) { + default: + return -EINVAL; + case SG_DXFER_TO_DEV: + writing = 1; + break; + case SG_DXFER_TO_FROM_DEV: + case SG_DXFER_FROM_DEV: + break; + } + if (hdr->flags & SG_FLAG_Q_AT_HEAD) + at_head = 1; + + ret = -ENOMEM; + rq = blk_get_request(sdev->request_queue, writing ? + REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + req = scsi_req(rq); + + if (hdr->cmd_len > BLK_MAX_CDB) { + req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL); + if (!req->cmd) + goto out_put_request; + } + + ret = scsi_fill_sghdr_rq(sdev, rq, hdr, mode); + if (ret < 0) + goto out_free_cdb; + + ret = 0; + if (hdr->iovec_count) { + struct iov_iter i; + struct iovec *iov = NULL; + + ret = import_iovec(rq_data_dir(rq), hdr->dxferp, + hdr->iovec_count, 0, &iov, &i); + if (ret < 0) + goto out_free_cdb; + + /* SG_IO howto says that the shorter of the two wins */ + iov_iter_truncate(&i, hdr->dxfer_len); + + ret = blk_rq_map_user_iov(rq->q, rq, NULL, &i, GFP_KERNEL); + kfree(iov); + } else if (hdr->dxfer_len) + ret = blk_rq_map_user(rq->q, rq, NULL, hdr->dxferp, + hdr->dxfer_len, GFP_KERNEL); + + if (ret) + goto out_free_cdb; + + bio = rq->bio; + req->retries = 0; + + start_time = jiffies; + + blk_execute_rq(disk, rq, at_head); + + hdr->duration = jiffies_to_msecs(jiffies - start_time); + + ret = scsi_complete_sghdr_rq(rq, hdr, bio); + +out_free_cdb: + scsi_req_free_cmd(req); +out_put_request: + blk_put_request(rq); + return ret; +} + +/** + * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl + * @q: request queue to send scsi commands down + * @disk: gendisk to operate on (option) + * @mode: mode used to open the file through which the ioctl has been + * submitted + * @sic: userspace structure describing the command to perform + * + * Send down the scsi command described by @sic to the device below + * the request queue @q. If @file is non-NULL it's used to perform + * fine-grained permission checks that allow users to send down + * non-destructive SCSI commands. If the caller has a struct gendisk + * available it should be passed in as @disk to allow the low level + * driver to use the information contained in it. A non-NULL @disk + * is only allowed if the caller knows that the low level driver doesn't + * need it (e.g. in the scsi subsystem). + * + * Notes: + * - This interface is deprecated - users should use the SG_IO + * interface instead, as this is a more flexible approach to + * performing SCSI commands on a device. + * - The SCSI command length is determined by examining the 1st byte + * of the given command. There is no way to override this. + * - Data transfers are limited to PAGE_SIZE + * - The length (x + y) must be at least OMAX_SB_LEN bytes long to + * accommodate the sense buffer when an error occurs. + * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that + * old code will not be surprised. + * - If a Unix error occurs (e.g. ENOMEM) then the user will receive + * a negative return and the Unix error code in 'errno'. + * If the SCSI command succeeds then 0 is returned. + * Positive numbers returned are the compacted SCSI error codes (4 + * bytes in one int) where the lowest byte is the SCSI status. + */ +static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, + fmode_t mode, struct scsi_ioctl_command __user *sic) +{ + enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */ + struct request *rq; + struct scsi_request *req; + int err; + unsigned int in_len, out_len, bytes, opcode, cmdlen; + char *buffer = NULL; + + if (!sic) + return -EINVAL; + + /* + * get in an out lengths, verify they don't exceed a page worth of data + */ + if (get_user(in_len, &sic->inlen)) + return -EFAULT; + if (get_user(out_len, &sic->outlen)) + return -EFAULT; + if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) + return -EINVAL; + if (get_user(opcode, sic->data)) + return -EFAULT; + + bytes = max(in_len, out_len); + if (bytes) { + buffer = kzalloc(bytes, GFP_NOIO | GFP_USER | __GFP_NOWARN); + if (!buffer) + return -ENOMEM; + + } + + rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto error_free_buffer; + } + req = scsi_req(rq); + + cmdlen = COMMAND_SIZE(opcode); + + /* + * get command and data to send to device, if any + */ + err = -EFAULT; + req->cmd_len = cmdlen; + if (copy_from_user(req->cmd, sic->data, cmdlen)) + goto error; + + if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) + goto error; + + err = -EPERM; + if (!scsi_cmd_allowed(req->cmd, mode)) + goto error; + + /* default. possible overridden later */ + req->retries = 5; + + switch (opcode) { + case SEND_DIAGNOSTIC: + case FORMAT_UNIT: + rq->timeout = FORMAT_UNIT_TIMEOUT; + req->retries = 1; + break; + case START_STOP: + rq->timeout = START_STOP_TIMEOUT; + break; + case MOVE_MEDIUM: + rq->timeout = MOVE_MEDIUM_TIMEOUT; + break; + case READ_ELEMENT_STATUS: + rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; + break; + case READ_DEFECT_DATA: + rq->timeout = READ_DEFECT_DATA_TIMEOUT; + req->retries = 1; + break; + default: + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + break; + } + + if (bytes) { + err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO); + if (err) + goto error; + } + + blk_execute_rq(disk, rq, 0); + + err = req->result & 0xff; /* only 8 bit SCSI status */ + if (err) { + if (req->sense_len && req->sense) { + bytes = (OMAX_SB_LEN > req->sense_len) ? + req->sense_len : OMAX_SB_LEN; + if (copy_to_user(sic->data, req->sense, bytes)) + err = -EFAULT; + } + } else { + if (copy_to_user(sic->data, buffer, out_len)) + err = -EFAULT; + } + +error: + blk_put_request(rq); + +error_free_buffer: + kfree(buffer); + + return err; +} + +int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp) +{ +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + struct compat_sg_io_hdr hdr32 = { + .interface_id = hdr->interface_id, + .dxfer_direction = hdr->dxfer_direction, + .cmd_len = hdr->cmd_len, + .mx_sb_len = hdr->mx_sb_len, + .iovec_count = hdr->iovec_count, + .dxfer_len = hdr->dxfer_len, + .dxferp = (uintptr_t)hdr->dxferp, + .cmdp = (uintptr_t)hdr->cmdp, + .sbp = (uintptr_t)hdr->sbp, + .timeout = hdr->timeout, + .flags = hdr->flags, + .pack_id = hdr->pack_id, + .usr_ptr = (uintptr_t)hdr->usr_ptr, + .status = hdr->status, + .masked_status = hdr->masked_status, + .msg_status = hdr->msg_status, + .sb_len_wr = hdr->sb_len_wr, + .host_status = hdr->host_status, + .driver_status = hdr->driver_status, + .resid = hdr->resid, + .duration = hdr->duration, + .info = hdr->info, + }; + + if (copy_to_user(argp, &hdr32, sizeof(hdr32))) + return -EFAULT; + + return 0; + } +#endif + + if (copy_to_user(argp, hdr, sizeof(*hdr))) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(put_sg_io_hdr); + +int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp) +{ +#ifdef CONFIG_COMPAT + struct compat_sg_io_hdr hdr32; + + if (in_compat_syscall()) { + if (copy_from_user(&hdr32, argp, sizeof(hdr32))) + return -EFAULT; + + *hdr = (struct sg_io_hdr) { + .interface_id = hdr32.interface_id, + .dxfer_direction = hdr32.dxfer_direction, + .cmd_len = hdr32.cmd_len, + .mx_sb_len = hdr32.mx_sb_len, + .iovec_count = hdr32.iovec_count, + .dxfer_len = hdr32.dxfer_len, + .dxferp = compat_ptr(hdr32.dxferp), + .cmdp = compat_ptr(hdr32.cmdp), + .sbp = compat_ptr(hdr32.sbp), + .timeout = hdr32.timeout, + .flags = hdr32.flags, + .pack_id = hdr32.pack_id, + .usr_ptr = compat_ptr(hdr32.usr_ptr), + .status = hdr32.status, + .masked_status = hdr32.masked_status, + .msg_status = hdr32.msg_status, + .sb_len_wr = hdr32.sb_len_wr, + .host_status = hdr32.host_status, + .driver_status = hdr32.driver_status, + .resid = hdr32.resid, + .duration = hdr32.duration, + .info = hdr32.info, + }; + + return 0; + } +#endif + + if (copy_from_user(hdr, argp, sizeof(*hdr))) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(get_sg_io_hdr); + +#ifdef CONFIG_COMPAT +struct compat_cdrom_generic_command { + unsigned char cmd[CDROM_PACKET_SIZE]; + compat_caddr_t buffer; + compat_uint_t buflen; + compat_int_t stat; + compat_caddr_t sense; + unsigned char data_direction; + unsigned char pad[3]; + compat_int_t quiet; + compat_int_t timeout; + compat_caddr_t unused; +}; +#endif + +static int scsi_get_cdrom_generic_arg(struct cdrom_generic_command *cgc, + const void __user *arg) +{ +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + struct compat_cdrom_generic_command cgc32; + + if (copy_from_user(&cgc32, arg, sizeof(cgc32))) + return -EFAULT; + + *cgc = (struct cdrom_generic_command) { + .buffer = compat_ptr(cgc32.buffer), + .buflen = cgc32.buflen, + .stat = cgc32.stat, + .sense = compat_ptr(cgc32.sense), + .data_direction = cgc32.data_direction, + .quiet = cgc32.quiet, + .timeout = cgc32.timeout, + .unused = compat_ptr(cgc32.unused), + }; + memcpy(&cgc->cmd, &cgc32.cmd, CDROM_PACKET_SIZE); + return 0; + } +#endif + if (copy_from_user(cgc, arg, sizeof(*cgc))) + return -EFAULT; + + return 0; +} + +static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc, + void __user *arg) +{ +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) { + struct compat_cdrom_generic_command cgc32 = { + .buffer = (uintptr_t)(cgc->buffer), + .buflen = cgc->buflen, + .stat = cgc->stat, + .sense = (uintptr_t)(cgc->sense), + .data_direction = cgc->data_direction, + .quiet = cgc->quiet, + .timeout = cgc->timeout, + .unused = (uintptr_t)(cgc->unused), + }; + memcpy(&cgc32.cmd, &cgc->cmd, CDROM_PACKET_SIZE); + + if (copy_to_user(arg, &cgc32, sizeof(cgc32))) + return -EFAULT; + + return 0; + } +#endif + if (copy_to_user(arg, cgc, sizeof(*cgc))) + return -EFAULT; + + return 0; +} + +static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk, + fmode_t mode, void __user *arg) +{ + struct cdrom_generic_command cgc; + struct sg_io_hdr hdr; + int err; + + err = scsi_get_cdrom_generic_arg(&cgc, arg); + if (err) + return err; + + cgc.timeout = clock_t_to_jiffies(cgc.timeout); + memset(&hdr, 0, sizeof(hdr)); + hdr.interface_id = 'S'; + hdr.cmd_len = sizeof(cgc.cmd); + hdr.dxfer_len = cgc.buflen; + switch (cgc.data_direction) { + case CGC_DATA_UNKNOWN: + hdr.dxfer_direction = SG_DXFER_UNKNOWN; + break; + case CGC_DATA_WRITE: + hdr.dxfer_direction = SG_DXFER_TO_DEV; + break; + case CGC_DATA_READ: + hdr.dxfer_direction = SG_DXFER_FROM_DEV; + break; + case CGC_DATA_NONE: + hdr.dxfer_direction = SG_DXFER_NONE; + break; + default: + return -EINVAL; + } + + hdr.dxferp = cgc.buffer; + hdr.sbp = cgc.sense; + if (hdr.sbp) + hdr.mx_sb_len = sizeof(struct request_sense); + hdr.timeout = jiffies_to_msecs(cgc.timeout); + hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd; + hdr.cmd_len = sizeof(cgc.cmd); + + err = sg_io(sdev, disk, &hdr, mode); + if (err == -EFAULT) + return -EFAULT; + + if (hdr.status) + return -EIO; + + cgc.stat = err; + cgc.buflen = hdr.resid; + if (scsi_put_cdrom_generic_arg(&cgc, arg)) + return -EFAULT; + + return err; +} + +static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk, + fmode_t mode, void __user *argp) +{ + struct sg_io_hdr hdr; + int error; + + error = get_sg_io_hdr(&hdr, argp); + if (error) + return error; + error = sg_io(sdev, disk, &hdr, mode); + if (error == -EFAULT) + return error; + if (put_sg_io_hdr(&hdr, argp)) + return -EFAULT; + return 0; +} + +/** + * scsi_ioctl - Dispatch ioctl to scsi device + * @sdev: scsi device receiving ioctl + * @disk: disk receiving the ioctl + * @mode: mode the block/char device is opened with + * @cmd: which ioctl is it + * @arg: data associated with ioctl + * + * Description: The scsi_ioctl() function differs from most ioctls in that it + * does not take a major/minor number as the dev field. Rather, it takes + * a pointer to a &struct scsi_device. + */ +int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode, + int cmd, void __user *arg) +{ + struct request_queue *q = sdev->request_queue; struct scsi_sense_hdr sense_hdr; /* Check for deprecated ioctls ... all the ioctls which don't @@ -212,26 +912,34 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg } switch (cmd) { - case SCSI_IOCTL_GET_IDLUN: { - struct scsi_idlun v = { - .dev_id = (sdev->id & 0xff) - + ((sdev->lun & 0xff) << 8) - + ((sdev->channel & 0xff) << 16) - + ((sdev->host->host_no & 0xff) << 24), - .host_unique_id = sdev->host->unique_id - }; - if (copy_to_user(arg, &v, sizeof(struct scsi_idlun))) - return -EFAULT; - return 0; - } + case SG_GET_VERSION_NUM: + return sg_get_version(arg); + case SG_SET_TIMEOUT: + return sg_set_timeout(sdev, arg); + case SG_GET_TIMEOUT: + return jiffies_to_clock_t(sdev->sg_timeout); + case SG_GET_RESERVED_SIZE: + return sg_get_reserved_size(sdev, arg); + case SG_SET_RESERVED_SIZE: + return sg_set_reserved_size(sdev, arg); + case SG_EMULATED_HOST: + return sg_emulated_host(q, arg); + case SG_IO: + return scsi_ioctl_sg_io(sdev, disk, mode, arg); + case SCSI_IOCTL_SEND_COMMAND: + return sg_scsi_ioctl(q, disk, mode, arg); + case CDROM_SEND_PACKET: + return scsi_cdrom_send_packet(sdev, disk, mode, arg); + case CDROMCLOSETRAY: + return scsi_send_start_stop(sdev, 3); + case CDROMEJECT: + return scsi_send_start_stop(sdev, 2); + case SCSI_IOCTL_GET_IDLUN: + return scsi_get_idlun(sdev, arg); case SCSI_IOCTL_GET_BUS_NUMBER: return put_user(sdev->host->host_no, (int __user *)arg); case SCSI_IOCTL_PROBE_HOST: return ioctl_probe(sdev->host, arg); - case SCSI_IOCTL_SEND_COMMAND: - if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) - return -EACCES; - return sg_scsi_ioctl(sdev->request_queue, NULL, 0, arg); case SCSI_IOCTL_DOORLOCK: return scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); case SCSI_IOCTL_DOORUNLOCK: @@ -240,66 +948,27 @@ static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT, NORMAL_RETRIES, &sense_hdr); case SCSI_IOCTL_START_UNIT: - scsi_cmd[0] = START_STOP; - scsi_cmd[1] = 0; - scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; - scsi_cmd[4] = 1; - return ioctl_internal_command(sdev, scsi_cmd, - START_STOP_TIMEOUT, NORMAL_RETRIES); + return scsi_send_start_stop(sdev, 1); case SCSI_IOCTL_STOP_UNIT: - scsi_cmd[0] = START_STOP; - scsi_cmd[1] = 0; - scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[5] = 0; - scsi_cmd[4] = 0; - return ioctl_internal_command(sdev, scsi_cmd, - START_STOP_TIMEOUT, NORMAL_RETRIES); + return scsi_send_start_stop(sdev, 0); case SCSI_IOCTL_GET_PCI: return scsi_ioctl_get_pci(sdev, arg); case SG_SCSI_RESET: return scsi_ioctl_reset(sdev, arg); } - return -ENOIOCTLCMD; -} - -/** - * scsi_ioctl - Dispatch ioctl to scsi device - * @sdev: scsi device receiving ioctl - * @cmd: which ioctl is it - * @arg: data associated with ioctl - * - * Description: The scsi_ioctl() function differs from most ioctls in that it - * does not take a major/minor number as the dev field. Rather, it takes - * a pointer to a &struct scsi_device. - */ -int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) -{ - int ret = scsi_ioctl_common(sdev, cmd, arg); - - if (ret != -ENOIOCTLCMD) - return ret; - - if (sdev->host->hostt->ioctl) - return sdev->host->hostt->ioctl(sdev, cmd, arg); - - return -EINVAL; -} -EXPORT_SYMBOL(scsi_ioctl); #ifdef CONFIG_COMPAT -int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) -{ - int ret = scsi_ioctl_common(sdev, cmd, arg); - - if (ret != -ENOIOCTLCMD) - return ret; - - if (sdev->host->hostt->compat_ioctl) + if (in_compat_syscall()) { + if (!sdev->host->hostt->compat_ioctl) + return -EINVAL; return sdev->host->hostt->compat_ioctl(sdev, cmd, arg); - - return ret; -} -EXPORT_SYMBOL(scsi_compat_ioctl); + } #endif + if (!sdev->host->hostt->ioctl) + return -EINVAL; + return sdev->host->hostt->ioctl(sdev, cmd, arg); +} +EXPORT_SYMBOL(scsi_ioctl); /* * We can process a reset even when a device isn't fully operable. diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 7456a26aef51..9ba1aa7530a9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -119,13 +119,15 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason) static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) { - if (cmd->request->rq_flags & RQF_DONTPREP) { - cmd->request->rq_flags &= ~RQF_DONTPREP; + struct request *rq = scsi_cmd_to_rq(cmd); + + if (rq->rq_flags & RQF_DONTPREP) { + rq->rq_flags &= ~RQF_DONTPREP; scsi_mq_uninit_cmd(cmd); } else { WARN_ON_ONCE(true); } - blk_mq_requeue_request(cmd->request, true); + blk_mq_requeue_request(rq, true); } /** @@ -164,7 +166,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) */ cmd->result = 0; - blk_mq_requeue_request(cmd->request, true); + blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true); } /** @@ -478,7 +480,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost) static void scsi_uninit_cmd(struct scsi_cmnd *cmd) { - if (!blk_rq_is_passthrough(cmd->request)) { + if (!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) { struct scsi_driver *drv = scsi_cmd_to_driver(cmd); if (drv->uninit_command) @@ -624,7 +626,7 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd, static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) { - struct request *req = cmd->request; + struct request *req = scsi_cmd_to_rq(cmd); unsigned long wait_for; if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) @@ -643,7 +645,7 @@ static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) { struct request_queue *q = cmd->device->request_queue; - struct request *req = cmd->request; + struct request *req = scsi_cmd_to_rq(cmd); int level = 0; enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, ACTION_DELAYED_RETRY} action; @@ -818,7 +820,7 @@ static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, { bool sense_valid; bool sense_current = true; /* false implies "deferred sense" */ - struct request *req = cmd->request; + struct request *req = scsi_cmd_to_rq(cmd); struct scsi_sense_hdr sshdr; sense_valid = scsi_command_normalize_sense(cmd, &sshdr); @@ -907,7 +909,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; struct request_queue *q = cmd->device->request_queue; - struct request *req = cmd->request; + struct request *req = scsi_cmd_to_rq(cmd); blk_status_t blk_stat = BLK_STS_OK; if (unlikely(result)) /* a nz result may or may not be an error */ @@ -978,7 +980,7 @@ static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); unsigned short nr_segs = blk_rq_nr_phys_segments(rq); struct scatterlist *last_sg = NULL; blk_status_t ret; @@ -1083,8 +1085,13 @@ EXPORT_SYMBOL(scsi_alloc_sgtables); static void scsi_initialize_rq(struct request *rq) { struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + struct scsi_request *req = &cmd->req; + + memset(req->__cmd, 0, sizeof(req->__cmd)); + req->cmd = req->__cmd; + req->cmd_len = BLK_MAX_CDB; + req->sense_len = 0; - scsi_req_init(&cmd->req); init_rcu_head(&cmd->rcu); cmd->jiffies_at_alloc = jiffies; cmd->retries = 0; @@ -1107,7 +1114,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) { void *buf = cmd->sense_buffer; void *prot = cmd->prot_sdb; - struct request *rq = blk_mq_rq_from_pdu(cmd); + struct request *rq = scsi_cmd_to_rq(cmd); unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS; unsigned long jiffies_at_alloc; int retries, to_clear; @@ -1533,7 +1540,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req) scsi_init_command(sdev, cmd); - cmd->request = req; cmd->tag = req->tag; cmd->prot_op = SCSI_PROT_NORMAL; if (blk_rq_bytes(req)) @@ -1572,12 +1578,12 @@ static blk_status_t scsi_prepare_cmd(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { - if (unlikely(blk_should_fake_timeout(cmd->request->q))) + if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q))) return; if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) return; trace_scsi_dispatch_cmd_done(cmd); - blk_mq_complete_request(cmd->request); + blk_mq_complete_request(scsi_cmd_to_rq(cmd)); } static void scsi_mq_put_budget(struct request_queue *q, int budget_token) diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c index 2317717935e9..ed9572252a42 100644 --- a/drivers/scsi/scsi_logging.c +++ b/drivers/scsi/scsi_logging.c @@ -28,8 +28,9 @@ static void scsi_log_release_buffer(char *bufptr) static inline const char *scmd_name(const struct scsi_cmnd *scmd) { - return scmd->request->rq_disk ? - scmd->request->rq_disk->disk_name : NULL; + struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd); + + return rq->rq_disk ? rq->rq_disk->disk_name : NULL; } static size_t sdev_format_header(char *logbuf, size_t logbuf_len, @@ -91,7 +92,7 @@ void scmd_printk(const char *level, const struct scsi_cmnd *scmd, if (!logbuf) return; off = sdev_format_header(logbuf, logbuf_len, scmd_name(scmd), - scmd->request->tag); + scsi_cmd_to_rq((struct scsi_cmnd *)scmd)->tag); if (off < logbuf_len) { va_start(args, fmt); off += vscnprintf(logbuf + off, logbuf_len - off, fmt, args); @@ -188,7 +189,7 @@ void scsi_print_command(struct scsi_cmnd *cmd) return; off = sdev_format_header(logbuf, logbuf_len, - scmd_name(cmd), cmd->request->tag); + scmd_name(cmd), scsi_cmd_to_rq(cmd)->tag); if (off >= logbuf_len) goto out_printk; off += scnprintf(logbuf + off, logbuf_len - off, "CDB: "); @@ -210,7 +211,7 @@ void scsi_print_command(struct scsi_cmnd *cmd) off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd), - cmd->request->tag); + scsi_cmd_to_rq(cmd)->tag); if (!WARN_ON(off > logbuf_len - 58)) { off += scnprintf(logbuf + off, logbuf_len - off, "CDB[%02x]: ", k); @@ -373,7 +374,8 @@ EXPORT_SYMBOL(__scsi_print_sense); /* Normalize and print sense buffer in SCSI command */ void scsi_print_sense(const struct scsi_cmnd *cmd) { - scsi_log_print_sense(cmd->device, scmd_name(cmd), cmd->request->tag, + scsi_log_print_sense(cmd->device, scmd_name(cmd), + scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); } EXPORT_SYMBOL(scsi_print_sense); @@ -391,8 +393,8 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg, if (!logbuf) return; - off = sdev_format_header(logbuf, logbuf_len, - scmd_name(cmd), cmd->request->tag); + off = sdev_format_header(logbuf, logbuf_len, scmd_name(cmd), + scsi_cmd_to_rq((struct scsi_cmnd *)cmd)->tag); if (off >= logbuf_len) goto out_printk; diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index eae2235f79b5..6d9152031a40 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -7,6 +7,7 @@ #include <scsi/scsi_device.h> #include <linux/sbitmap.h> +struct bsg_device; struct request_queue; struct request; struct scsi_cmnd; @@ -180,6 +181,8 @@ static inline void scsi_dh_add_device(struct scsi_device *sdev) { } static inline void scsi_dh_release_device(struct scsi_device *sdev) { } #endif +struct bsg_device *scsi_bsg_register_queue(struct scsi_device *sdev); + extern int scsi_device_max_queue_depth(struct scsi_device *sdev); /* diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 5b6996a2401b..fe22191522a3 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -267,6 +267,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, */ sdev->borken = 1; + sdev->sg_reserved_size = INT_MAX; + q = blk_mq_init_queue(&sdev->host->tag_set); if (IS_ERR(q)) { /* release fn is set up in scsi_sysfs_device_initialise, so @@ -974,6 +976,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, if (*bflags & BLIST_UNMAP_LIMIT_WS) sdev->unmap_limit_for_ws = 1; + if (*bflags & BLIST_IGN_MEDIA_CHANGE) + sdev->ignore_media_change = 1; + sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; if (*bflags & BLIST_TRY_VPD_PAGES) diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index ae9bfc658203..c3a710bceba0 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -13,6 +13,7 @@ #include <linux/blkdev.h> #include <linux/device.h> #include <linux/pm_runtime.h> +#include <linux/bsg.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -1330,7 +1331,6 @@ static int scsi_target_add(struct scsi_target *starget) int scsi_sysfs_add_sdev(struct scsi_device *sdev) { int error, i; - struct request_queue *rq = sdev->request_queue; struct scsi_target *starget = sdev->sdev_target; error = scsi_target_add(starget); @@ -1369,12 +1369,19 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) transport_add_device(&sdev->sdev_gendev); sdev->is_visible = 1; - error = bsg_scsi_register_queue(rq, &sdev->sdev_gendev); - if (error) - /* we're treating error on bsg register as non-fatal, - * so pretend nothing went wrong */ - sdev_printk(KERN_INFO, sdev, - "Failed to register bsg queue, errno=%d\n", error); + if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) { + sdev->bsg_dev = scsi_bsg_register_queue(sdev); + if (IS_ERR(sdev->bsg_dev)) { + /* + * We're treating error on bsg register as non-fatal, so + * pretend nothing went wrong. + */ + sdev_printk(KERN_INFO, sdev, + "Failed to register bsg queue, errno=%d\n", + error); + sdev->bsg_dev = NULL; + } + } /* add additional host specific attributes */ if (sdev->host->hostt->sdev_attrs) { @@ -1436,7 +1443,8 @@ void __scsi_remove_device(struct scsi_device *sdev) sysfs_remove_groups(&sdev->sdev_gendev.kobj, sdev->host->hostt->sdev_groups); - bsg_unregister_queue(sdev->request_queue); + if (IS_ENABLED(CONFIG_BLK_DEV_BSG) && sdev->bsg_dev) + bsg_unregister_queue(sdev->bsg_dev); device_unregister(&sdev->sdev_dev); transport_remove_device(dev); device_del(dev); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 49748cd817a5..60e406bcf42a 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3804,7 +3804,7 @@ bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd) struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device)); if ((rport->port_state != FC_PORTSTATE_ONLINE) && - (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT)) { + (scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) { set_host_byte(scmd, DID_TRANSPORT_MARGINAL); return false; } diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 5af7a10e9514..bd72c38d7bfc 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -1230,7 +1230,7 @@ int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) { if (cmd->flags & SCMD_TAGGED) { *msg++ = SIMPLE_QUEUE_TAG; - *msg++ = cmd->request->tag; + *msg++ = scsi_cmd_to_rq(cmd)->tag; return 2; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6d2d63629a90..ac431b0477da 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -98,11 +98,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD); MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC); -#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT) #define SD_MINORS 16 -#else -#define SD_MINORS 0 -#endif static void sd_config_discard(struct scsi_disk *, unsigned int); static void sd_config_write_same(struct scsi_disk *); @@ -114,6 +110,7 @@ static void sd_shutdown(struct device *); static int sd_suspend_system(struct device *); static int sd_suspend_runtime(struct device *); static int sd_resume(struct device *); +static int sd_resume_runtime(struct device *); static void sd_rescan(struct device *); static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt); static void sd_uninit_command(struct scsi_cmnd *SCpnt); @@ -608,7 +605,7 @@ static const struct dev_pm_ops sd_pm_ops = { .poweroff = sd_suspend_system, .restore = sd_resume, .runtime_suspend = sd_suspend_runtime, - .runtime_resume = sd_resume, + .runtime_resume = sd_resume_runtime, }; static struct scsi_driver sd_template = { @@ -779,8 +776,9 @@ static unsigned int sd_prot_flag_mask(unsigned int prot_op) static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd, unsigned int dix, unsigned int dif) { - struct bio *bio = scmd->request->bio; - unsigned int prot_op = sd_prot_op(rq_data_dir(scmd->request), dix, dif); + struct request *rq = scsi_cmd_to_rq(scmd); + struct bio *bio = rq->bio; + unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif); unsigned int protect = 0; if (dix) { /* DIX Type 0, 1, 2, 3 */ @@ -871,7 +869,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) { struct scsi_device *sdp = cmd->device; - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); @@ -907,7 +905,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap) { struct scsi_device *sdp = cmd->device; - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); @@ -939,7 +937,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap) { struct scsi_device *sdp = cmd->device; - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); @@ -969,7 +967,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd) { - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq)); @@ -1066,7 +1064,7 @@ out: **/ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) { - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); struct bio *bio = rq->bio; @@ -1115,7 +1113,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) { - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); /* flush requests don't perform I/O, zero the S/G table */ @@ -1213,7 +1211,7 @@ static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) { - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); @@ -1327,7 +1325,7 @@ fail: static blk_status_t sd_init_command(struct scsi_cmnd *cmd) { - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); switch (req_op(rq)) { case REQ_OP_DISCARD: @@ -1373,7 +1371,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd) static void sd_uninit_command(struct scsi_cmnd *SCpnt) { - struct request *rq = SCpnt->request; + struct request *rq = scsi_cmd_to_rq(SCpnt); u8 *cmnd; if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) @@ -1533,11 +1531,11 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) } /** - * sd_ioctl_common - process an ioctl + * sd_ioctl - process an ioctl * @bdev: target block device * @mode: FMODE_* mask * @cmd: ioctl command number - * @p: this is third argument given to ioctl(2) system call. + * @arg: this is third argument given to ioctl(2) system call. * Often contains a pointer. * * Returns 0 if successful (some ioctls return positive numbers on @@ -1546,20 +1544,20 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) * Note: most ioctls are forward onto the block subsystem or further * down in the scsi subsystem. **/ -static int sd_ioctl_common(struct block_device *bdev, fmode_t mode, - unsigned int cmd, void __user *p) +static int sd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; + void __user *p = (void __user *)arg; int error; SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " "cmd=0x%x\n", disk->disk_name, cmd)); - error = scsi_verify_blk_ioctl(bdev, cmd); - if (error < 0) - return error; + if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) + return -ENOIOCTLCMD; /* * If we are in the middle of error recovery, don't let anyone @@ -1570,27 +1568,11 @@ static int sd_ioctl_common(struct block_device *bdev, fmode_t mode, error = scsi_ioctl_block_when_processing_errors(sdp, cmd, (mode & FMODE_NDELAY) != 0); if (error) - goto out; + return error; if (is_sed_ioctl(cmd)) return sed_ioctl(sdkp->opal_dev, cmd, p); - - /* - * Send SCSI addressing ioctls directly to mid level, send other - * ioctls to block level and then onto mid level if they can't be - * resolved. - */ - switch (cmd) { - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - error = scsi_ioctl(sdp, cmd, p); - break; - default: - error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p); - break; - } -out: - return error; + return scsi_ioctl(sdp, disk, mode, cmd, p); } static void set_media_not_present(struct scsi_disk *sdkp) @@ -1773,34 +1755,6 @@ static void sd_rescan(struct device *dev) sd_revalidate_disk(sdkp->disk); } -static int sd_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - void __user *p = (void __user *)arg; - int ret; - - ret = sd_ioctl_common(bdev, mode, cmd, p); - if (ret != -ENOTTY) - return ret; - - return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p); -} - -#ifdef CONFIG_COMPAT -static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - void __user *p = compat_ptr(arg); - int ret; - - ret = sd_ioctl_common(bdev, mode, cmd, p); - if (ret != -ENOTTY) - return ret; - - return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p); -} -#endif - static char sd_pr_type(enum pr_type type) { switch (type) { @@ -1901,9 +1855,7 @@ static const struct block_device_operations sd_fops = { .release = sd_release, .ioctl = sd_ioctl, .getgeo = sd_getgeo, -#ifdef CONFIG_COMPAT - .compat_ioctl = sd_compat_ioctl, -#endif + .compat_ioctl = blkdev_compat_ptr_ioctl, .check_events = sd_check_events, .unlock_native_capacity = sd_unlock_native_capacity, .report_zones = sd_zbc_report_zones, @@ -1924,7 +1876,7 @@ static const struct block_device_operations sd_fops = { **/ static void sd_eh_reset(struct scsi_cmnd *scmd) { - struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); + struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk); /* New SCSI EH run, reset gate variable */ sdkp->ignore_medium_access_errors = false; @@ -1944,7 +1896,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd) **/ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) { - struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); + struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk); struct scsi_device *sdev = scmd->device; if (!scsi_device_online(sdev) || @@ -1985,7 +1937,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) { - struct request *req = scmd->request; + struct request *req = scsi_cmd_to_rq(scmd); struct scsi_device *sdev = scmd->device; unsigned int transferred, good_bytes; u64 start_lba, end_lba, bad_lba; @@ -2040,8 +1992,8 @@ static int sd_done(struct scsi_cmnd *SCpnt) unsigned int sector_size = SCpnt->device->sector_size; unsigned int resid; struct scsi_sense_hdr sshdr; - struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); - struct request *req = SCpnt->request; + struct request *req = scsi_cmd_to_rq(SCpnt); + struct scsi_disk *sdkp = scsi_disk(req->rq_disk); int sense_valid = 0; int sense_deferred = 0; @@ -3720,6 +3672,25 @@ static int sd_resume(struct device *dev) return ret; } +static int sd_resume_runtime(struct device *dev) +{ + struct scsi_disk *sdkp = dev_get_drvdata(dev); + struct scsi_device *sdp = sdkp->device; + + if (sdp->ignore_media_change) { + /* clear the device's sense data */ + static const u8 cmd[10] = { REQUEST_SENSE }; + + if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, + NULL, sdp->request_queue->rq_timeout, 1, 0, + RQF_PM, NULL)) + sd_printk(KERN_NOTICE, sdkp, + "Failed to clear sense data\n"); + } + + return sd_resume(dev); +} + /** * init_sd - entry point for this driver (both when built in or when * a module). diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 186b5ff52c3a..b9757f24b0d6 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -243,7 +243,7 @@ out: static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd) { - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); sector_t sector = blk_rq_pos(rq); @@ -321,7 +321,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work) blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, unsigned int nr_blocks) { - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); unsigned int wp_offset, zno = blk_rq_zone_no(rq); unsigned long flags; @@ -386,7 +386,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, unsigned char op, bool all) { - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); sector_t sector = blk_rq_pos(rq); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); sector_t block = sectors_to_logical(sdkp->device, sector); @@ -442,7 +442,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); unsigned int zno = blk_rq_zone_no(rq); enum req_opf op = req_op(rq); @@ -516,7 +516,7 @@ unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr) { int result = cmd->result; - struct request *rq = cmd->request; + struct request *rq = scsi_cmd_to_rq(cmd); if (op_is_zone_mgmt(req_op(rq)) && result && diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 91e2221bbb0d..9be76deea242 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -238,8 +238,9 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd) if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; - - return blk_verify_command(cmd, filp->f_mode); + if (!scsi_cmd_allowed(cmd, filp->f_mode)) + return -EPERM; + return 0; } static int @@ -1108,7 +1109,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, case SCSI_IOCTL_SEND_COMMAND: if (atomic_read(&sdp->detaching)) return -ENODEV; - return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p); + return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p); case SG_SET_DEBUG: result = get_user(val, ip); if (result) @@ -1165,28 +1166,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); if (ret != -ENOIOCTLCMD) return ret; - - return scsi_ioctl(sdp->device, cmd_in, p); -} - -#ifdef CONFIG_COMPAT -static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) -{ - void __user *p = compat_ptr(arg); - Sg_device *sdp; - Sg_fd *sfp; - int ret; - - if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) - return -ENXIO; - - ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); - if (ret != -ENOIOCTLCMD) - return ret; - - return scsi_compat_ioctl(sdp->device, cmd_in, p); + return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p); } -#endif static __poll_t sg_poll(struct file *filp, poll_table * wait) @@ -1441,9 +1422,7 @@ static const struct file_operations sg_fops = { .write = sg_write, .poll = sg_poll, .unlocked_ioctl = sg_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sg_compat_ioctl, -#endif + .compat_ioctl = compat_ptr_ioctl, .open = sg_open, .mmap = sg_mmap, .release = sg_release, diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig index cb9e4e968b60..6f83e2df4d64 100644 --- a/drivers/scsi/smartpqi/Kconfig +++ b/drivers/scsi/smartpqi/Kconfig @@ -1,7 +1,7 @@ # # Kernel configuration file for the SMARTPQI # -# Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries +# Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries # Copyright (c) 2017-2018 Microsemi Corporation # Copyright (c) 2016 Microsemi Corporation # Copyright (c) 2016 PMC-Sierra, Inc. @@ -38,14 +38,14 @@ # HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES config SCSI_SMARTPQI - tristate "Microsemi PQI Driver" + tristate "Microchip PQI Driver" depends on PCI && SCSI && !S390 select SCSI_SAS_ATTRS select RAID_ATTRS help - This driver supports Microsemi PQI controllers. + This driver supports Microchip PQI controllers. - <http://www.microsemi.com> + <http://www.microchip.com> To compile this driver as a module, choose M here: the module will be called smartpqi. diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index d7dac5572274..f340afc011b5 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * @@ -59,7 +59,7 @@ struct pqi_device_registers { /* * controller registers * - * These are defined by the Microsemi implementation. + * These are defined by the Microchip implementation. * * Some registers (those named sis_*) are only used when in * legacy SIS mode before we transition the controller into diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index dcc0b9618a64..d95498ff136a 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * @@ -33,13 +33,13 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.8-045" +#define DRIVER_VERSION "2.1.10-020" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 8 -#define DRIVER_REVISION 45 +#define DRIVER_RELEASE 10 +#define DRIVER_REVISION 20 -#define DRIVER_NAME "Microsemi PQI Driver (v" \ +#define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" #define DRIVER_NAME_SHORT "smartpqi" @@ -48,8 +48,8 @@ #define PQI_POST_RESET_DELAY_SECS 5 #define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS 10 -MODULE_AUTHOR("Microsemi"); -MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version " +MODULE_AUTHOR("Microchip"); +MODULE_DESCRIPTION("Driver for Microchip Smart Family Controller version " DRIVER_VERSION); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); @@ -5568,7 +5568,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, { u16 hw_queue; - hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request)); + hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scsi_cmd_to_rq(scmd))); if (hw_queue > ctrl_info->max_hw_queue_index) hw_queue = 0; @@ -5577,7 +5577,7 @@ static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info, static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd) { - if (blk_rq_is_passthrough(scmd->request)) + if (blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) return false; return scmd->SCp.this_residual == 0; @@ -6033,8 +6033,10 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) mutex_lock(&ctrl_info->lun_reset_mutex); dev_err(&ctrl_info->pci_dev->dev, - "resetting scsi %d:%d:%d:%d\n", - shost->host_no, device->bus, device->target, device->lun); + "resetting scsi %d:%d:%d:%d due to cmd 0x%02x\n", + shost->host_no, + device->bus, device->target, device->lun, + scmd->cmd_len > 0 ? scmd->cmnd[0] : 0xff); pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) @@ -7758,11 +7760,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) pqi_init_operational_queues(ctrl_info); - rc = pqi_request_irqs(ctrl_info); + rc = pqi_create_queues(ctrl_info); if (rc) return rc; - rc = pqi_create_queues(ctrl_info); + rc = pqi_request_irqs(ctrl_info); if (rc) return rc; @@ -8451,7 +8453,7 @@ static void pqi_print_ctrl_info(struct pci_dev *pci_dev, if (id->driver_data) ctrl_description = (char *)id->driver_data; else - ctrl_description = "Microsemi Smart Family Controller"; + ctrl_description = "Microchip Smart Family Controller"; dev_info(&pci_dev->dev, "%s found\n", ctrl_description); } @@ -8713,6 +8715,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1108) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0x1109) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0x8460) }, { @@ -9173,6 +9183,34 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1dfc, 0x3161) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x5445) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x5446) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x5447) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0b27) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0b29) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0b45) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) }, { 0 } diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index dd628cc87f78..afd9bafebd1d 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index c954620628e0..d63c46a8e38b 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 12cd2ab1aead..d29c1352a826 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * driver for Microsemi PQI-based storage controllers - * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries + * driver for Microchip PQI-based storage controllers + * Copyright (c) 2019-2021 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c index 6dd0ff188bb4..95740caa1eb0 100644 --- a/drivers/scsi/snic/snic_scsi.c +++ b/drivers/scsi/snic/snic_scsi.c @@ -33,7 +33,7 @@ #include "snic_io.h" #include "snic.h" -#define snic_cmd_tag(sc) (((struct scsi_cmnd *) sc)->request->tag) +#define snic_cmd_tag(sc) (scsi_cmd_to_rq(sc)->tag) const char *snic_state_str[] = { [SNIC_INIT] = "SNIC_INIT", @@ -1636,7 +1636,7 @@ snic_abort_cmd(struct scsi_cmnd *sc) u32 start_time = jiffies; SNIC_SCSI_DBG(snic->shost, "abt_cmd:sc %p :0x%x :req = %p :tag = %d\n", - sc, sc->cmnd[0], sc->request, tag); + sc, sc->cmnd[0], scsi_cmd_to_rq(sc), tag); if (unlikely(snic_get_state(snic) != SNIC_ONLINE)) { SNIC_HOST_ERR(snic->shost, @@ -2152,7 +2152,7 @@ snic_device_reset(struct scsi_cmnd *sc) int dr_supp = 0; SNIC_SCSI_DBG(shost, "dev_reset:sc %p :0x%x :req = %p :tag = %d\n", - sc, sc->cmnd[0], sc->request, + sc, sc->cmnd[0], scsi_cmd_to_rq(sc), snic_cmd_tag(sc)); dr_supp = snic_dev_reset_supported(sc->device); if (!dr_supp) { @@ -2383,11 +2383,11 @@ snic_host_reset(struct scsi_cmnd *sc) { struct Scsi_Host *shost = sc->device->host; u32 start_time = jiffies; - int ret = FAILED; + int ret; SNIC_SCSI_DBG(shost, "host reset:sc %p sc_cmd 0x%x req %p tag %d flags 0x%llx\n", - sc, sc->cmnd[0], sc->request, + sc, sc->cmnd[0], scsi_cmd_to_rq(sc), snic_cmd_tag(sc), CMD_FLAGS(sc)); ret = snic_reset(shost, sc); @@ -2494,7 +2494,7 @@ cleanup: sc->result = DID_TRANSPORT_DISRUPTED << 16; SNIC_HOST_INFO(snic->shost, "sc_clean: DID_TRANSPORT_DISRUPTED for sc %p, Tag %d flags 0x%llx rqi %p duration %u msecs\n", - sc, sc->request->tag, CMD_FLAGS(sc), rqi, + sc, scsi_cmd_to_rq(sc)->tag, CMD_FLAGS(sc), rqi, jiffies_to_msecs(jiffies - st_time)); /* Update IO stats */ diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index a6d3ac0a6cbc..d65dbd189514 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -120,6 +120,8 @@ static void get_capabilities(struct scsi_cd *); static unsigned int sr_check_events(struct cdrom_device_info *cdi, unsigned int clearing, int slot); static int sr_packet(struct cdrom_device_info *, struct packet_command *); +static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf, + u32 lba, u32 nr, u8 *last_sense); static const struct cdrom_device_ops sr_dops = { .open = sr_open, @@ -133,8 +135,9 @@ static const struct cdrom_device_ops sr_dops = { .get_mcn = sr_get_mcn, .reset = sr_reset, .audio_ioctl = sr_audio_ioctl, - .capability = SR_CAPABILITIES, .generic_packet = sr_packet, + .read_cdda_bpc = sr_read_cdda_bpc, + .capability = SR_CAPABILITIES, }; static void sr_kref_release(struct kref *kref); @@ -328,7 +331,8 @@ static int sr_done(struct scsi_cmnd *SCpnt) int good_bytes = (result == 0 ? this_count : 0); int block_sectors = 0; long error_sector; - struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk); + struct request *rq = scsi_cmd_to_rq(SCpnt); + struct scsi_cd *cd = scsi_cd(rq->rq_disk); #ifdef DEBUG scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result); @@ -350,16 +354,14 @@ static int sr_done(struct scsi_cmnd *SCpnt) break; error_sector = get_unaligned_be32(&SCpnt->sense_buffer[3]); - if (SCpnt->request->bio != NULL) - block_sectors = - bio_sectors(SCpnt->request->bio); + if (rq->bio != NULL) + block_sectors = bio_sectors(rq->bio); if (block_sectors < 4) block_sectors = 4; if (cd->device->sector_size == 2048) error_sector <<= 2; error_sector &= ~(block_sectors - 1); - good_bytes = (error_sector - - blk_rq_pos(SCpnt->request)) << 9; + good_bytes = (error_sector - blk_rq_pos(rq)) << 9; if (good_bytes < 0 || good_bytes >= this_count) good_bytes = 0; /* @@ -391,7 +393,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt) { int block = 0, this_count, s_size; struct scsi_cd *cd; - struct request *rq = SCpnt->request; + struct request *rq = scsi_cmd_to_rq(SCpnt); blk_status_t ret; ret = scsi_alloc_sgtables(SCpnt); @@ -556,53 +558,14 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode) static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { - struct scsi_cd *cd = scsi_cd(bdev->bd_disk); + struct gendisk *disk = bdev->bd_disk; + struct scsi_cd *cd = scsi_cd(disk); struct scsi_device *sdev = cd->device; void __user *argp = (void __user *)arg; int ret; - mutex_lock(&cd->lock); - - ret = scsi_ioctl_block_when_processing_errors(sdev, cmd, - (mode & FMODE_NDELAY) != 0); - if (ret) - goto out; - - scsi_autopm_get_device(sdev); - - /* - * Send SCSI addressing ioctls directly to mid level, send other - * ioctls to cdrom/block level. - */ - switch (cmd) { - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - ret = scsi_ioctl(sdev, cmd, argp); - goto put; - } - - ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); - if (ret != -ENOSYS) - goto put; - - ret = scsi_ioctl(sdev, cmd, argp); - -put: - scsi_autopm_put_device(sdev); - -out: - mutex_unlock(&cd->lock); - return ret; -} - -#ifdef CONFIG_COMPAT -static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, - unsigned long arg) -{ - struct scsi_cd *cd = scsi_cd(bdev->bd_disk); - struct scsi_device *sdev = cd->device; - void __user *argp = compat_ptr(arg); - int ret; + if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO)) + return -ENOIOCTLCMD; mutex_lock(&cd->lock); @@ -613,32 +576,19 @@ static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsign scsi_autopm_get_device(sdev); - /* - * Send SCSI addressing ioctls directly to mid level, send other - * ioctls to cdrom/block level. - */ - switch (cmd) { - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - ret = scsi_compat_ioctl(sdev, cmd, argp); - goto put; + if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) { + ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); + if (ret != -ENOSYS) + goto put; } - - ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp); - if (ret != -ENOSYS) - goto put; - - ret = scsi_compat_ioctl(sdev, cmd, argp); + ret = scsi_ioctl(sdev, disk, mode, cmd, argp); put: scsi_autopm_put_device(sdev); - out: mutex_unlock(&cd->lock); return ret; - } -#endif static unsigned int sr_block_check_events(struct gendisk *disk, unsigned int clearing) @@ -663,9 +613,7 @@ static const struct block_device_operations sr_bdops = .open = sr_block_open, .release = sr_block_release, .ioctl = sr_block_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = sr_block_compat_ioctl, -#endif + .compat_ioctl = blkdev_compat_ptr_ioctl, .check_events = sr_block_check_events, }; @@ -1005,6 +953,57 @@ static int sr_packet(struct cdrom_device_info *cdi, return cgc->stat; } +static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf, + u32 lba, u32 nr, u8 *last_sense) +{ + struct gendisk *disk = cdi->disk; + u32 len = nr * CD_FRAMESIZE_RAW; + struct scsi_request *req; + struct request *rq; + struct bio *bio; + int ret; + + rq = blk_get_request(disk->queue, REQ_OP_DRV_IN, 0); + if (IS_ERR(rq)) + return PTR_ERR(rq); + req = scsi_req(rq); + + ret = blk_rq_map_user(disk->queue, rq, NULL, ubuf, len, GFP_KERNEL); + if (ret) + goto out_put_request; + + req->cmd[0] = GPCMD_READ_CD; + req->cmd[1] = 1 << 2; + req->cmd[2] = (lba >> 24) & 0xff; + req->cmd[3] = (lba >> 16) & 0xff; + req->cmd[4] = (lba >> 8) & 0xff; + req->cmd[5] = lba & 0xff; + req->cmd[6] = (nr >> 16) & 0xff; + req->cmd[7] = (nr >> 8) & 0xff; + req->cmd[8] = nr & 0xff; + req->cmd[9] = 0xf8; + req->cmd_len = 12; + rq->timeout = 60 * HZ; + bio = rq->bio; + + blk_execute_rq(disk, rq, 0); + if (scsi_req(rq)->result) { + struct scsi_sense_hdr sshdr; + + scsi_normalize_sense(req->sense, req->sense_len, + &sshdr); + *last_sense = sshdr.sense_key; + ret = -EIO; + } + + if (blk_rq_unmap_user(bio)) + ret = -EFAULT; +out_put_request: + blk_put_request(rq); + return ret; +} + + /** * sr_kref_release - Called to free the scsi_cd structure * @kref: pointer to embedded kref diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index c6f14540ae03..2d1b0594af69 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -3499,8 +3499,9 @@ out: /* The ioctl command */ -static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p) +static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) { + void __user *p = (void __user *)arg; int i, cmd_nr, cmd_type, bt; int retval = 0; unsigned int blk; @@ -3820,73 +3821,44 @@ static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user goto out; } mutex_unlock(&STp->lock); - switch (cmd_in) { - case SCSI_IOCTL_STOP_UNIT: - /* unload */ - retval = scsi_ioctl(STp->device, cmd_in, p); - if (!retval) { - STp->rew_at_close = 0; - STp->ready = ST_NO_TAPE; - } - return retval; - case SCSI_IOCTL_GET_IDLUN: - case SCSI_IOCTL_GET_BUS_NUMBER: - break; + switch (cmd_in) { + case SG_IO: + case SCSI_IOCTL_SEND_COMMAND: + case CDROM_SEND_PACKET: + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + default: + break; + } - default: - if ((cmd_in == SG_IO || - cmd_in == SCSI_IOCTL_SEND_COMMAND || - cmd_in == CDROM_SEND_PACKET) && - !capable(CAP_SYS_RAWIO)) - i = -EPERM; - else - i = scsi_cmd_ioctl(STp->disk->queue, STp->disk, - file->f_mode, cmd_in, p); - if (i != -ENOTTY) - return i; - break; + retval = scsi_ioctl(STp->device, STp->disk, file->f_mode, cmd_in, p); + if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { + /* unload */ + STp->rew_at_close = 0; + STp->ready = ST_NO_TAPE; } - return -ENOTTY; + return retval; out: mutex_unlock(&STp->lock); return retval; } -static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) -{ - void __user *p = (void __user *)arg; - struct scsi_tape *STp = file->private_data; - int ret; - - ret = st_ioctl_common(file, cmd_in, p); - if (ret != -ENOTTY) - return ret; - - return scsi_ioctl(STp->device, cmd_in, p); -} - #ifdef CONFIG_COMPAT static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) { - void __user *p = compat_ptr(arg); - struct scsi_tape *STp = file->private_data; - int ret; - /* argument conversion is handled using put_user_mtpos/put_user_mtget */ switch (cmd_in) { case MTIOCPOS32: - return st_ioctl_common(file, MTIOCPOS, p); + cmd_in = MTIOCPOS; + break; case MTIOCGET32: - return st_ioctl_common(file, MTIOCGET, p); + cmd_in = MTIOCGET; + break; } - ret = st_ioctl_common(file, cmd_in, p); - if (ret != -ENOTTY) - return ret; - - return scsi_compat_ioctl(STp->device, cmd_in, p); + return st_ioctl(file, cmd_in, arg); } #endif diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 491b435273a6..f1ba7f5b52a8 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -540,7 +540,7 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) msg_h = (struct st_msg_header *)req - 1; if (likely(cmd)) { msg_h->channel = (u8)cmd->device->channel; - msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ); + msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ); } addr = hba->dma_handle + hba->req_head * hba->rq_size; addr += (hba->ccb[tag].sg_count+4)/11; @@ -690,7 +690,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) cmd->scsi_done = done; - tag = cmd->request->tag; + tag = scsi_cmd_to_rq(cmd)->tag; if (unlikely(tag >= host->can_queue)) return SCSI_MLQUEUE_HOST_BUSY; @@ -1246,7 +1246,7 @@ static int stex_abort(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct st_hba *hba = (struct st_hba *)host->hostdata; - u16 tag = cmd->request->tag; + u16 tag = scsi_cmd_to_rq(cmd)->tag; void __iomem *base; u32 data; int result = SUCCESS; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 328bb961c281..e2278b0125e7 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -710,7 +710,7 @@ static u64 storvsc_next_request_id(struct vmbus_channel *channel, u64 rqst_addr) * Cannot return an ID of 0, which is reserved for an unsolicited * message from Hyper-V. */ - return (u64)blk_mq_unique_tag(request->cmd->request) + 1; + return (u64)blk_mq_unique_tag(scsi_cmd_to_rq(request->cmd)) + 1; } static void handle_sc_creation(struct vmbus_channel *new_sc) @@ -1202,7 +1202,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS) storvsc_log(device, STORVSC_LOGGING_ERROR, "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n", - request->cmd->request->tag, + scsi_cmd_to_rq(request->cmd)->tag, stor_pkt->vm_srb.cdb[0], vstor_packet->vm_srb.scsi_status, vstor_packet->vm_srb.srb_status, diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 2e3fbc2fae97..f7f724a3ff1d 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -336,7 +336,7 @@ static int sun3scsi_dma_xfer_len(struct NCR5380_hostdata *hostdata, { int wanted_len = cmd->SCp.this_residual; - if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(cmd->request)) + if (wanted_len < DMA_MIN_SIZE || blk_rq_is_passthrough(scsi_cmd_to_rq(cmd))) return 0; return wanted_len; @@ -366,8 +366,9 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) } /* clean up after our dma is done */ -static int sun3scsi_dma_finish(int write_flag) +static int sun3scsi_dma_finish(enum dma_data_direction data_dir) { + const bool write_flag = data_dir == DMA_TO_DEVICE; unsigned short __maybe_unused count; unsigned short fifo; int ret = 0; diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 16b65fc4405c..6d0b07b9cb31 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -500,8 +500,8 @@ static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd, * Shorten our settle_time if needed for * this command not to time out. */ - if (np->s.settle_time_valid && cmd->request->timeout) { - unsigned long tlimit = jiffies + cmd->request->timeout; + if (np->s.settle_time_valid && scsi_cmd_to_rq(cmd)->timeout) { + unsigned long tlimit = jiffies + scsi_cmd_to_rq(cmd)->timeout; tlimit -= SYM_CONF_TIMER_INTERVAL*2; if (time_after(np->s.settle_time, tlimit)) { np->s.settle_time = tlimit; diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 2d137953e7b4..432df76e6318 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -183,3 +183,19 @@ config SCSI_UFS_CRYPTO Enabling this makes it possible for the kernel to use the crypto capabilities of the UFS device (if present) to perform crypto operations on data being transferred to/from the device. + +config SCSI_UFS_HPB + bool "Support UFS Host Performance Booster" + depends on SCSI_UFSHCD + help + The UFS HPB feature improves random read performance. It caches + L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB + read command by piggybacking physical page number for bypassing FTL (flash + translation layer)'s L2P address translation. + +config SCSI_UFS_FAULT_INJECTION + bool "UFS Fault Injection Support" + depends on SCSI_UFSHCD && FAULT_INJECTION + help + Enable fault injection support in the UFS driver. This makes it easier + to test the UFS error handler and abort handler. diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 06f3a3fe4a44..c407da9b5171 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -8,6 +8,8 @@ ufshcd-core-y += ufshcd.o ufs-sysfs.o ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o +ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o +ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c index 908ff39c4856..7da8be2f35c4 100644 --- a/drivers/scsi/ufs/cdns-pltfrm.c +++ b/drivers/scsi/ufs/cdns-pltfrm.c @@ -318,11 +318,8 @@ static int cdns_ufs_pltfrm_remove(struct platform_device *pdev) } static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { - .suspend = ufshcd_pltfrm_suspend, - .resume = ufshcd_pltfrm_resume, - .runtime_suspend = ufshcd_pltfrm_runtime_suspend, - .runtime_resume = ufshcd_pltfrm_runtime_resume, - .runtime_idle = ufshcd_pltfrm_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, }; diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c index ec4589afbc13..679289e1a78e 100644 --- a/drivers/scsi/ufs/tc-dwc-g210-pci.c +++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c @@ -23,31 +23,6 @@ static int tc_type = TC_G210_INV; module_param(tc_type, int, 0); MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)"); -static int tc_dwc_g210_pci_suspend(struct device *dev) -{ - return ufshcd_system_suspend(dev_get_drvdata(dev)); -} - -static int tc_dwc_g210_pci_resume(struct device *dev) -{ - return ufshcd_system_resume(dev_get_drvdata(dev)); -} - -static int tc_dwc_g210_pci_runtime_suspend(struct device *dev) -{ - return ufshcd_runtime_suspend(dev_get_drvdata(dev)); -} - -static int tc_dwc_g210_pci_runtime_resume(struct device *dev) -{ - return ufshcd_runtime_resume(dev_get_drvdata(dev)); -} - -static int tc_dwc_g210_pci_runtime_idle(struct device *dev) -{ - return ufshcd_runtime_idle(dev_get_drvdata(dev)); -} - /* * struct ufs_hba_dwc_vops - UFS DWC specific variant operations */ @@ -143,11 +118,8 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = { - .suspend = tc_dwc_g210_pci_suspend, - .resume = tc_dwc_g210_pci_resume, - .runtime_suspend = tc_dwc_g210_pci_runtime_suspend, - .runtime_resume = tc_dwc_g210_pci_runtime_resume, - .runtime_idle = tc_dwc_g210_pci_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, }; diff --git a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c index a1268e4f44d6..783ec43efa78 100644 --- a/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c +++ b/drivers/scsi/ufs/tc-dwc-g210-pltfrm.c @@ -84,11 +84,8 @@ static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev) } static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { - .suspend = ufshcd_pltfrm_suspend, - .resume = ufshcd_pltfrm_resume, - .runtime_suspend = ufshcd_pltfrm_runtime_suspend, - .runtime_resume = ufshcd_pltfrm_runtime_resume, - .runtime_idle = ufshcd_pltfrm_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) }; static struct platform_driver tc_dwc_g210_pltfm_driver = { diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c index cf46d6f86e0e..5aa096e5b6cc 100644 --- a/drivers/scsi/ufs/ufs-exynos.c +++ b/drivers/scsi/ufs/ufs-exynos.c @@ -1287,11 +1287,8 @@ static const struct of_device_id exynos_ufs_of_match[] = { }; static const struct dev_pm_ops exynos_ufs_pm_ops = { - .suspend = ufshcd_pltfrm_suspend, - .resume = ufshcd_pltfrm_resume, - .runtime_suspend = ufshcd_pltfrm_runtime_suspend, - .runtime_resume = ufshcd_pltfrm_runtime_resume, - .runtime_idle = ufshcd_pltfrm_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, }; diff --git a/drivers/scsi/ufs/ufs-fault-injection.c b/drivers/scsi/ufs/ufs-fault-injection.c new file mode 100644 index 000000000000..7ac7c4e7ff83 --- /dev/null +++ b/drivers/scsi/ufs/ufs-fault-injection.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/kconfig.h> +#include <linux/types.h> +#include <linux/fault-inject.h> +#include <linux/module.h> +#include "ufs-fault-injection.h" + +static int ufs_fault_get(char *buffer, const struct kernel_param *kp); +static int ufs_fault_set(const char *val, const struct kernel_param *kp); + +static const struct kernel_param_ops ufs_fault_ops = { + .get = ufs_fault_get, + .set = ufs_fault_set, +}; + +enum { FAULT_INJ_STR_SIZE = 80 }; + +/* + * For more details about fault injection, please refer to + * Documentation/fault-injection/fault-injection.rst. + */ +static char g_trigger_eh_str[FAULT_INJ_STR_SIZE]; +module_param_cb(trigger_eh, &ufs_fault_ops, g_trigger_eh_str, 0644); +MODULE_PARM_DESC(trigger_eh, + "Fault injection. trigger_eh=<interval>,<probability>,<space>,<times>"); +static DECLARE_FAULT_ATTR(ufs_trigger_eh_attr); + +static char g_timeout_str[FAULT_INJ_STR_SIZE]; +module_param_cb(timeout, &ufs_fault_ops, g_timeout_str, 0644); +MODULE_PARM_DESC(timeout, + "Fault injection. timeout=<interval>,<probability>,<space>,<times>"); +static DECLARE_FAULT_ATTR(ufs_timeout_attr); + +static int ufs_fault_get(char *buffer, const struct kernel_param *kp) +{ + const char *fault_str = kp->arg; + + return sysfs_emit(buffer, "%s\n", fault_str); +} + +static int ufs_fault_set(const char *val, const struct kernel_param *kp) +{ + struct fault_attr *attr = NULL; + + if (kp->arg == g_trigger_eh_str) + attr = &ufs_trigger_eh_attr; + else if (kp->arg == g_timeout_str) + attr = &ufs_timeout_attr; + + if (WARN_ON_ONCE(!attr)) + return -EINVAL; + + if (!setup_fault_attr(attr, (char *)val)) + return -EINVAL; + + strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE); + + return 0; +} + +bool ufs_trigger_eh(void) +{ + return should_fail(&ufs_trigger_eh_attr, 1); +} + +bool ufs_fail_completion(void) +{ + return should_fail(&ufs_timeout_attr, 1); +} diff --git a/drivers/scsi/ufs/ufs-fault-injection.h b/drivers/scsi/ufs/ufs-fault-injection.h new file mode 100644 index 000000000000..6d0cd8e10c87 --- /dev/null +++ b/drivers/scsi/ufs/ufs-fault-injection.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _UFS_FAULT_INJECTION_H +#define _UFS_FAULT_INJECTION_H + +#include <linux/kconfig.h> +#include <linux/types.h> + +#ifdef CONFIG_SCSI_UFS_FAULT_INJECTION +bool ufs_trigger_eh(void); +bool ufs_fail_completion(void); +#else +static inline bool ufs_trigger_eh(void) +{ + return false; +} + +static inline bool ufs_fail_completion(void) +{ + return false; +} +#endif + +#endif /* _UFS_FAULT_INJECTION_H */ diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index 5b147a48161b..6b706de8354b 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -572,11 +572,8 @@ static int ufs_hisi_remove(struct platform_device *pdev) } static const struct dev_pm_ops ufs_hisi_pm_ops = { - .suspend = ufshcd_pltfrm_suspend, - .resume = ufshcd_pltfrm_resume, - .runtime_suspend = ufshcd_pltfrm_runtime_suspend, - .runtime_resume = ufshcd_pltfrm_runtime_resume, - .runtime_idle = ufshcd_pltfrm_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, }; diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index d2c251628a05..80b3545dd17d 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -1140,11 +1140,8 @@ static int ufs_mtk_remove(struct platform_device *pdev) } static const struct dev_pm_ops ufs_mtk_pm_ops = { - .suspend = ufshcd_pltfrm_suspend, - .resume = ufshcd_pltfrm_resume, - .runtime_suspend = ufshcd_pltfrm_runtime_suspend, - .runtime_resume = ufshcd_pltfrm_runtime_resume, - .runtime_idle = ufshcd_pltfrm_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, }; diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 9b1d18d7c9bb..9d9770f1db4f 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -1546,11 +1546,8 @@ MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); #endif static const struct dev_pm_ops ufs_qcom_pm_ops = { - .suspend = ufshcd_pltfrm_suspend, - .resume = ufshcd_pltfrm_resume, - .runtime_suspend = ufshcd_pltfrm_runtime_suspend, - .runtime_resume = ufshcd_pltfrm_runtime_resume, - .runtime_idle = ufshcd_pltfrm_runtime_idle, + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, }; diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index 52bd807f7940..5c405ff7b6ea 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -604,6 +604,8 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2); UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1); UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4); UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1); +UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2); +UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1); UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4); UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1); UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1); @@ -636,6 +638,8 @@ static struct attribute *ufs_sysfs_device_descriptor[] = { &dev_attr_number_of_secure_wpa.attr, &dev_attr_psa_max_data_size.attr, &dev_attr_psa_state_timeout.attr, + &dev_attr_hpb_version.attr, + &dev_attr_hpb_control.attr, &dev_attr_ext_feature_sup.attr, &dev_attr_wb_presv_us_en.attr, &dev_attr_wb_type.attr, @@ -709,6 +713,10 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units, _ENM4_MAX_NUM_UNITS, 4); UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor, _ENM4_CAP_ADJ_FCTR, 2); +UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1); +UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2); UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4); UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1); UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1); @@ -746,6 +754,10 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = { &dev_attr_enh3_memory_capacity_adjustment_factor.attr, &dev_attr_enh4_memory_max_alloc_units.attr, &dev_attr_enh4_memory_capacity_adjustment_factor.attr, + &dev_attr_hpb_region_size.attr, + &dev_attr_hpb_number_lu.attr, + &dev_attr_hpb_subregion_size.attr, + &dev_attr_hpb_max_active_regions.attr, &dev_attr_wb_max_alloc_units.attr, &dev_attr_wb_max_wb_luns.attr, &dev_attr_wb_buff_cap_adj.attr, @@ -1006,6 +1018,7 @@ UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE); UFS_FLAG(wb_enable, _WB_EN); UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN); UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8); +UFS_FLAG(hpb_enable, _HPB_EN); static struct attribute *ufs_sysfs_device_flags[] = { &dev_attr_device_init.attr, @@ -1019,6 +1032,7 @@ static struct attribute *ufs_sysfs_device_flags[] = { &dev_attr_wb_enable.attr, &dev_attr_wb_flush_en.attr, &dev_attr_wb_flush_during_h8.attr, + &dev_attr_hpb_enable.attr, NULL, }; @@ -1065,6 +1079,7 @@ out: \ static DEVICE_ATTR_RO(_name) UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN); +UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD); UFS_ATTRIBUTE(current_power_mode, _POWER_MODE); UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL); UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN); @@ -1088,6 +1103,7 @@ UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE); static struct attribute *ufs_sysfs_attributes[] = { &dev_attr_boot_lun_enabled.attr, + &dev_attr_max_data_size_hpb_single_cmd.attr, &dev_attr_current_power_mode.attr, &dev_attr_active_icc_level.attr, &dev_attr_ooo_data_enabled.attr, @@ -1147,6 +1163,7 @@ static DEVICE_ATTR_RO(_pname) #define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \ UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size) +UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1); UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1); UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1); UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1); @@ -1160,10 +1177,13 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); +UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2); +UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2); +UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2); UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); - static struct attribute *ufs_sysfs_unit_descriptor[] = { + &dev_attr_lu_enable.attr, &dev_attr_boot_lun_id.attr, &dev_attr_lun_write_protect.attr, &dev_attr_lun_queue_depth.attr, @@ -1177,6 +1197,9 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { &dev_attr_physical_memory_resourse_count.attr, &dev_attr_context_capabilities.attr, &dev_attr_large_unit_granularity.attr, + &dev_attr_hpb_lu_max_active_regions.attr, + &dev_attr_hpb_pinned_region_start_offset.attr, + &dev_attr_hpb_number_pinned_regions.attr, &dev_attr_wb_buf_alloc_units.attr, NULL, }; diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index cb80b9670bfe..8c6b38b1b142 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -122,12 +122,14 @@ enum flag_idn { QUERY_FLAG_IDN_WB_EN = 0x0E, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN = 0x0F, QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8 = 0x10, + QUERY_FLAG_IDN_HPB_RESET = 0x11, + QUERY_FLAG_IDN_HPB_EN = 0x12, }; /* Attribute idn for Query requests */ enum attr_idn { QUERY_ATTR_IDN_BOOT_LU_EN = 0x00, - QUERY_ATTR_IDN_RESERVED = 0x01, + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD = 0x01, QUERY_ATTR_IDN_POWER_MODE = 0x02, QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03, QUERY_ATTR_IDN_OOO_DATA_EN = 0x04, @@ -195,6 +197,9 @@ enum unit_desc_param { UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20, UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS = 0x23, + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF = 0x25, + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS = 0x27, UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29, }; @@ -235,6 +240,8 @@ enum device_desc_param { DEVICE_DESC_PARAM_PSA_MAX_DATA = 0x25, DEVICE_DESC_PARAM_PSA_TMT = 0x29, DEVICE_DESC_PARAM_PRDCT_REV = 0x2A, + DEVICE_DESC_PARAM_HPB_VER = 0x40, + DEVICE_DESC_PARAM_HPB_CONTROL = 0x42, DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP = 0x4F, DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN = 0x53, DEVICE_DESC_PARAM_WB_TYPE = 0x54, @@ -283,6 +290,10 @@ enum geometry_desc_param { GEOMETRY_DESC_PARAM_ENM4_MAX_NUM_UNITS = 0x3E, GEOMETRY_DESC_PARAM_ENM4_CAP_ADJ_FCTR = 0x42, GEOMETRY_DESC_PARAM_OPT_LOG_BLK_SIZE = 0x44, + GEOMETRY_DESC_PARAM_HPB_REGION_SIZE = 0x48, + GEOMETRY_DESC_PARAM_HPB_NUMBER_LU = 0x49, + GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE = 0x4A, + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS = 0x4B, GEOMETRY_DESC_PARAM_WB_MAX_ALLOC_UNITS = 0x4F, GEOMETRY_DESC_PARAM_WB_MAX_WB_LUNS = 0x53, GEOMETRY_DESC_PARAM_WB_BUFF_CAP_ADJ = 0x54, @@ -327,8 +338,10 @@ enum { /* Possible values for dExtendedUFSFeaturesSupport */ enum { + UFS_DEV_HPB_SUPPORT = BIT(7), UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), }; +#define UFS_DEV_HPB_SUPPORT_VERSION 0x310 #define POWER_DESC_MAX_ACTV_ICC_LVLS 16 @@ -466,6 +479,41 @@ struct utp_cmd_rsp { u8 sense_data[UFS_SENSE_SIZE]; }; +struct ufshpb_active_field { + __be16 active_rgn; + __be16 active_srgn; +}; +#define HPB_ACT_FIELD_SIZE 4 + +/** + * struct utp_hpb_rsp - Response UPIU structure + * @residual_transfer_count: Residual transfer count DW-3 + * @reserved1: Reserved double words DW-4 to DW-7 + * @sense_data_len: Sense data length DW-8 U16 + * @desc_type: Descriptor type of sense data + * @additional_len: Additional length of sense data + * @hpb_op: HPB operation type + * @lun: LUN of response UPIU + * @active_rgn_cnt: Active region count + * @inactive_rgn_cnt: Inactive region count + * @hpb_active_field: Recommended to read HPB region and subregion + * @hpb_inactive_field: To be inactivated HPB region and subregion + */ +struct utp_hpb_rsp { + __be32 residual_transfer_count; + __be32 reserved1[4]; + __be16 sense_data_len; + u8 desc_type; + u8 additional_len; + u8 hpb_op; + u8 lun; + u8 active_rgn_cnt; + u8 inactive_rgn_cnt; + struct ufshpb_active_field hpb_active_field[2]; + __be16 hpb_inactive_field[2]; +}; +#define UTP_HPB_RSP_SIZE 40 + /** * struct utp_upiu_rsp - general upiu response structure * @header: UPIU header structure DW-0 to DW-2 @@ -476,6 +524,7 @@ struct utp_upiu_rsp { struct utp_upiu_header header; union { struct utp_cmd_rsp sr; + struct utp_hpb_rsp hr; struct utp_upiu_query qr; }; }; @@ -544,6 +593,9 @@ struct ufs_dev_info { u16 wspecversion; u32 clk_gating_wait_us; + /* UFS HPB related flag */ + bool hpb_enabled; + /* UFS WB related flags */ bool wb_enabled; bool wb_buf_flush_enabled; diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index 07f559ac5883..35ec9ea79869 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -116,4 +116,10 @@ struct ufs_dev_fix { */ #define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11) +/* + * Some UFS devices require L2P entry should be swapped before being sent to the + * UFS device for HPB READ command. + */ +#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12) + #endif /* UFS_QUIRKS_H_ */ diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index e6c334bfb4c2..b3bcc5c882da 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -385,48 +385,6 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = { .device_reset = ufs_intel_device_reset, }; -#ifdef CONFIG_PM_SLEEP -/** - * ufshcd_pci_suspend - suspend power management function - * @dev: pointer to PCI device handle - * - * Returns 0 if successful - * Returns non-zero otherwise - */ -static int ufshcd_pci_suspend(struct device *dev) -{ - return ufshcd_system_suspend(dev_get_drvdata(dev)); -} - -/** - * ufshcd_pci_resume - resume power management function - * @dev: pointer to PCI device handle - * - * Returns 0 if successful - * Returns non-zero otherwise - */ -static int ufshcd_pci_resume(struct device *dev) -{ - return ufshcd_system_resume(dev_get_drvdata(dev)); -} - -#endif /* !CONFIG_PM_SLEEP */ - -#ifdef CONFIG_PM -static int ufshcd_pci_runtime_suspend(struct device *dev) -{ - return ufshcd_runtime_suspend(dev_get_drvdata(dev)); -} -static int ufshcd_pci_runtime_resume(struct device *dev) -{ - return ufshcd_runtime_resume(dev_get_drvdata(dev)); -} -static int ufshcd_pci_runtime_idle(struct device *dev) -{ - return ufshcd_runtime_idle(dev_get_drvdata(dev)); -} -#endif /* !CONFIG_PM */ - /** * ufshcd_pci_shutdown - main function to put the controller in reset state * @pdev: pointer to PCI device handle @@ -510,10 +468,8 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } static const struct dev_pm_ops ufshcd_pci_pm_ops = { - SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend, - ufshcd_pci_runtime_resume, - ufshcd_pci_runtime_idle) - SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend, ufshcd_pci_resume) + SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) + SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) #ifdef CONFIG_PM_SLEEP .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index 298e22ef907e..8859c13f4e09 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -170,53 +170,6 @@ out: return err; } -#ifdef CONFIG_PM -/** - * ufshcd_pltfrm_suspend - suspend power management function - * @dev: pointer to device handle - * - * Returns 0 if successful - * Returns non-zero otherwise - */ -int ufshcd_pltfrm_suspend(struct device *dev) -{ - return ufshcd_system_suspend(dev_get_drvdata(dev)); -} -EXPORT_SYMBOL_GPL(ufshcd_pltfrm_suspend); - -/** - * ufshcd_pltfrm_resume - resume power management function - * @dev: pointer to device handle - * - * Returns 0 if successful - * Returns non-zero otherwise - */ -int ufshcd_pltfrm_resume(struct device *dev) -{ - return ufshcd_system_resume(dev_get_drvdata(dev)); -} -EXPORT_SYMBOL_GPL(ufshcd_pltfrm_resume); - -int ufshcd_pltfrm_runtime_suspend(struct device *dev) -{ - return ufshcd_runtime_suspend(dev_get_drvdata(dev)); -} -EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_suspend); - -int ufshcd_pltfrm_runtime_resume(struct device *dev) -{ - return ufshcd_runtime_resume(dev_get_drvdata(dev)); -} -EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_resume); - -int ufshcd_pltfrm_runtime_idle(struct device *dev) -{ - return ufshcd_runtime_idle(dev_get_drvdata(dev)); -} -EXPORT_SYMBOL_GPL(ufshcd_pltfrm_runtime_idle); - -#endif /* CONFIG_PM */ - void ufshcd_pltfrm_shutdown(struct platform_device *pdev) { ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev)); diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h index 772a8e848098..c33e28ac6ef6 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.h +++ b/drivers/scsi/ufs/ufshcd-pltfrm.h @@ -33,22 +33,4 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, const struct ufs_hba_variant_ops *vops); void ufshcd_pltfrm_shutdown(struct platform_device *pdev); -#ifdef CONFIG_PM - -int ufshcd_pltfrm_suspend(struct device *dev); -int ufshcd_pltfrm_resume(struct device *dev); -int ufshcd_pltfrm_runtime_suspend(struct device *dev); -int ufshcd_pltfrm_runtime_resume(struct device *dev); -int ufshcd_pltfrm_runtime_idle(struct device *dev); - -#else /* !CONFIG_PM */ - -#define ufshcd_pltfrm_suspend NULL -#define ufshcd_pltfrm_resume NULL -#define ufshcd_pltfrm_runtime_suspend NULL -#define ufshcd_pltfrm_runtime_resume NULL -#define ufshcd_pltfrm_runtime_idle NULL - -#endif /* CONFIG_PM */ - #endif /* UFSHCD_PLTFRM_H_ */ diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 708b3b62fc4d..a3b419848f0a 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -17,15 +17,18 @@ #include <linux/blk-pm.h> #include <linux/blkdev.h> #include <scsi/scsi_driver.h> +#include <scsi/scsi_transport.h> +#include "../scsi_transport_api.h" #include "ufshcd.h" #include "ufs_quirks.h" #include "unipro.h" #include "ufs-sysfs.h" #include "ufs-debugfs.h" +#include "ufs-fault-injection.h" #include "ufs_bsg.h" #include "ufshcd-crypto.h" +#include "ufshpb.h" #include <asm/unaligned.h> -#include "../sd.h" #define CREATE_TRACE_POINTS #include <trace/events/ufs.h> @@ -128,15 +131,6 @@ enum { UFSHCD_CAN_QUEUE = 32, }; -/* UFSHCD states */ -enum { - UFSHCD_STATE_RESET, - UFSHCD_STATE_ERROR, - UFSHCD_STATE_OPERATIONAL, - UFSHCD_STATE_EH_SCHEDULED_FATAL, - UFSHCD_STATE_EH_SCHEDULED_NON_FATAL, -}; - /* UFSHCD error handling flags */ enum { UFSHCD_EH_IN_PROGRESS = (1 << 0), @@ -205,7 +199,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state, static struct ufs_dev_fix ufs_fixups[] = { /* UFS cards deviations table */ UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL, - UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM), + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | + UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ), UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE | @@ -242,7 +237,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); static irqreturn_t ufshcd_intr(int irq, void *__hba); static int ufshcd_change_power_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *pwr_mode); -static void ufshcd_schedule_eh_work(struct ufs_hba *hba); static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, @@ -253,11 +247,6 @@ static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable); static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba); static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba); -static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) -{ - return tag >= 0 && tag < hba->nutrs; -} - static inline void ufshcd_enable_irq(struct ufs_hba *hba) { if (!hba->is_irq_enabled) { @@ -371,26 +360,24 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba, static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, enum ufs_trace_str_t str_t) { - u64 lba = -1; + u64 lba; u8 opcode = 0, group_id = 0; u32 intr, doorbell; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; struct scsi_cmnd *cmd = lrbp->cmd; + struct request *rq = scsi_cmd_to_rq(cmd); int transfer_len = -1; if (!cmd) return; - if (!trace_ufshcd_command_enabled()) { - /* trace UPIU W/O tracing command */ - ufshcd_add_cmd_upiu_trace(hba, tag, str_t); - return; - } - /* trace UPIU also */ ufshcd_add_cmd_upiu_trace(hba, tag, str_t); + if (!trace_ufshcd_command_enabled()) + return; + opcode = cmd->cmnd[0]; - lba = sectors_to_logical(cmd->device, blk_rq_pos(cmd->request)); + lba = scsi_get_lba(cmd); if (opcode == READ_10 || opcode == WRITE_10) { /* @@ -404,7 +391,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag, /* * The number of Bytes to be unmapped beginning with the lba. */ - transfer_len = blk_rq_bytes(cmd->request); + transfer_len = blk_rq_bytes(rq); } intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS); @@ -758,16 +745,6 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos) } /** - * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field - * @hba: per adapter instance - * @tag: position of the bit to be cleared - */ -static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) -{ - clear_bit(tag, &hba->outstanding_reqs); -} - -/** * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY * @reg: Register value of host controller status * @@ -2078,7 +2055,7 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) spin_lock_irqsave(hba->host->host_lock, flags); if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) { - struct request *req = lrbp->cmd->request; + struct request *req = scsi_cmd_to_rq(lrbp->cmd); struct ufs_hba_monitor *m = &hba->monitor; ktime_t now, inc, lat; @@ -2112,27 +2089,22 @@ static inline void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + unsigned long flags; lrbp->issue_time_stamp = ktime_get(); lrbp->compl_time_stamp = ktime_set(0, 0); - ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false)); ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND); ufshcd_clk_scaling_start_busy(hba); if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_start_monitor(hba, lrbp); - if (ufshcd_has_utrlcnr(hba)) { - set_bit(task_tag, &hba->outstanding_reqs); - ufshcd_writel(hba, 1 << task_tag, - REG_UTP_TRANSFER_REQ_DOOR_BELL); - } else { - unsigned long flags; - spin_lock_irqsave(hba->host->host_lock, flags); - set_bit(task_tag, &hba->outstanding_reqs); - ufshcd_writel(hba, 1 << task_tag, - REG_UTP_TRANSFER_REQ_DOOR_BELL); - spin_unlock_irqrestore(hba->host->host_lock, flags); - } + spin_lock_irqsave(&hba->outstanding_lock, flags); + if (hba->vops && hba->vops->setup_xfer_req) + hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); + __set_bit(task_tag, &hba->outstanding_reqs); + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + /* Make sure that doorbell is committed immediately */ wmb(); } @@ -2247,15 +2219,15 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) } /** - * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers + * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer * @hba: per adapter instance * @uic_cmd: UIC command - * - * Mutex must be held. */ static inline void ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) { + lockdep_assert_held(&hba->uic_cmd_mutex); + WARN_ON(hba->active_uic_cmd); hba->active_uic_cmd = uic_cmd; @@ -2273,11 +2245,10 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) } /** - * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command + * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command * @hba: per adapter instance * @uic_cmd: UIC command * - * Must be called with mutex held. * Returns 0 only if success. */ static int @@ -2286,6 +2257,8 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) int ret; unsigned long flags; + lockdep_assert_held(&hba->uic_cmd_mutex); + if (wait_for_completion_timeout(&uic_cmd->done, msecs_to_jiffies(UIC_CMD_TIMEOUT))) { ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT; @@ -2315,14 +2288,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) * @uic_cmd: UIC command * @completion: initialize the completion only if this is set to true * - * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called - * with mutex held and host_lock locked. * Returns 0 only if success. */ static int __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd, bool completion) { + lockdep_assert_held(&hba->uic_cmd_mutex); + lockdep_assert_held(hba->host->host_lock); + if (!ufshcd_ready_for_uic_cmd(hba)) { dev_err(hba->dev, "Controller not ready to accept UIC commands\n"); @@ -2701,20 +2675,12 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) */ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { + struct ufs_hba *hba = shost_priv(host); + int tag = scsi_cmd_to_rq(cmd)->tag; struct ufshcd_lrb *lrbp; - struct ufs_hba *hba; - int tag; int err = 0; - hba = shost_priv(host); - - tag = cmd->request->tag; - if (!ufshcd_valid_tag(hba, tag)) { - dev_err(hba->dev, - "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", - __func__, tag, cmd, cmd->request); - BUG(); - } + WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); if (!down_read_trylock(&hba->clk_scaling_lock)) return SCSI_MLQUEUE_HOST_BUSY; @@ -2748,12 +2714,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) set_host_byte(cmd, DID_ERROR); cmd->scsi_done(cmd); goto out; - default: - dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", - __func__, hba->ufshcd_state); - set_host_byte(cmd, DID_BAD_TARGET); - cmd->scsi_done(cmd); - goto out; } hba->req_abort_count = 0; @@ -2766,15 +2726,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) WARN_ON(ufshcd_is_clkgating_allowed(hba) && (hba->clk_gating.state != CLKS_ON)); - if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { - if (hba->pm_op_in_progress) - set_host_byte(cmd, DID_BAD_TARGET); - else - err = SCSI_MLQUEUE_HOST_BUSY; - ufshcd_release(hba); - goto out; - } - lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); lrbp->cmd = cmd; @@ -2784,10 +2735,17 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false; - ufshcd_prepare_lrbp_crypto(cmd->request, lrbp); + ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp); lrbp->req_abort_skip = false; + err = ufshpb_prep(hba, lrbp); + if (err == -EAGAIN) { + lrbp->cmd = NULL; + ufshcd_release(hba); + goto out; + } + ufshcd_comp_scsi_upiu(hba, lrbp); err = ufshcd_map_sg(hba, lrbp); @@ -2796,12 +2754,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_release(hba); goto out; } - /* Make sure descriptors are ready before ringing the doorbell */ - wmb(); ufshcd_send_command(hba, tag); out: up_read(&hba->clk_scaling_lock); + + if (ufs_trigger_eh()) + scsi_schedule_eh(hba->host); + return err; } @@ -2907,8 +2867,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, time_left = wait_for_completion_timeout(hba->dev_cmd.complete, msecs_to_jiffies(max_timeout)); - /* Make sure descriptors are ready before ringing the doorbell */ - wmb(); spin_lock_irqsave(hba->host->host_lock, flags); hba->dev_cmd.complete = NULL; if (likely(time_left)) { @@ -2930,7 +2888,9 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, * we also need to clear the outstanding_request * field in hba */ - ufshcd_outstanding_req_clear(hba, lrbp->task_tag); + spin_lock_irqsave(&hba->outstanding_lock, flags); + __clear_bit(lrbp->task_tag, &hba->outstanding_reqs); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); } return err; @@ -2949,11 +2909,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { struct request_queue *q = hba->cmd_queue; + DECLARE_COMPLETION_ONSTACK(wait); struct request *req; struct ufshcd_lrb *lrbp; int err; int tag; - struct completion wait; down_read(&hba->clk_scaling_lock); @@ -2968,17 +2928,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, goto out_unlock; } tag = req->tag; - WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); + WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); /* Set the timeout such that the SCSI error handler is not activated. */ req->timeout = msecs_to_jiffies(2 * timeout); blk_mq_start_request(req); - if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { - err = -EBUSY; - goto out; - } - - init_completion(&wait); lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); @@ -2988,8 +2942,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, hba->dev_cmd.complete = &wait; ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - /* Make sure descriptors are ready before ringing the doorbell */ - wmb(); ufshcd_send_command(hba, tag); err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); @@ -3194,7 +3146,7 @@ out_unlock: * * Returns 0 for success, non-zero in case of failure */ -static int ufshcd_query_attr_retry(struct ufs_hba *hba, +int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) { @@ -3419,9 +3371,11 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, if (is_kmalloc) { /* Make sure we don't copy more data than available */ - if (param_offset + param_size > buff_len) - param_size = buff_len - param_offset; - memcpy(param_read_buf, &desc_buf[param_offset], param_size); + if (param_offset >= buff_len) + ret = -EINVAL; + else + memcpy(param_read_buf, &desc_buf[param_offset], + min_t(u32, param_size, buff_len - param_offset)); } out: if (is_kmalloc) @@ -3965,6 +3919,35 @@ out: } EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); +static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) +{ + lockdep_assert_held(hba->host->host_lock); + + return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || + (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); +} + +static void ufshcd_schedule_eh(struct ufs_hba *hba) +{ + bool schedule_eh = false; + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + /* handle fatal errors only when link is not in error state */ + if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { + if (hba->force_reset || ufshcd_is_link_broken(hba) || + ufshcd_is_saved_err_fatal(hba)) + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; + else + hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; + schedule_eh = true; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (schedule_eh) + scsi_schedule_eh(hba->host); +} + /** * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power * state) and waits for it to take effect. @@ -3983,14 +3966,14 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); */ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) { - struct completion uic_async_done; + DECLARE_COMPLETION_ONSTACK(uic_async_done); unsigned long flags; + bool schedule_eh = false; u8 status; int ret; bool reenable_intr = false; mutex_lock(&hba->uic_cmd_mutex); - init_completion(&uic_async_done); ufshcd_add_delay_before_dme_cmd(hba); spin_lock_irqsave(hba->host->host_lock, flags); @@ -4055,10 +4038,14 @@ out: ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); if (ret) { ufshcd_set_link_broken(hba); - ufshcd_schedule_eh_work(hba); + schedule_eh = true; } + out_unlock: spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (schedule_eh) + ufshcd_schedule_eh(hba); mutex_unlock(&hba->uic_cmd_mutex); return ret; @@ -4987,6 +4974,26 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) return scsi_change_queue_depth(sdev, depth); } +static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_destroy_lu(hba, sdev); +} + +static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* skip well-known LU */ + if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) || + !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba)) + return; + + ufshpb_init_hpb_lu(hba, sdev); +} + /** * ufshcd_slave_configure - adjust SCSI device configurations * @sdev: pointer to SCSI device @@ -4996,6 +5003,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; + ufshcd_hpb_configure(hba, sdev); + blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); @@ -5020,15 +5029,37 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) static void ufshcd_slave_destroy(struct scsi_device *sdev) { struct ufs_hba *hba; + unsigned long flags; hba = shost_priv(sdev->host); + + ufshcd_hpb_destroy(hba, sdev); + /* Drop the reference as it won't be needed anymore */ if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) { - unsigned long flags; - spin_lock_irqsave(hba->host->host_lock, flags); hba->sdev_ufs_device = NULL; spin_unlock_irqrestore(hba->host->host_lock, flags); + } else if (hba->sdev_ufs_device) { + struct device *supplier = NULL; + + /* Ensure UFS Device WLUN exists and does not disappear */ + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->sdev_ufs_device) { + supplier = &hba->sdev_ufs_device->sdev_gendev; + get_device(supplier); + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + + if (supplier) { + /* + * If a LUN fails to probe (e.g. absent BOOT WLUN), the + * device will not have been registered but can still + * have a device link holding a reference to the device. + */ + device_link_remove(&sdev->sdev_gendev, supplier); + put_device(supplier); + } } } @@ -5124,6 +5155,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) /* Flushed in suspend */ schedule_work(&hba->eeh_work); + + if (scsi_status == SAM_STAT_GOOD) + ufshpb_rsp_upiu(hba, lrbp); break; case UPIU_TRANSACTION_REJECT_UPIU: /* TODO: handle Reject UPIU Response */ @@ -5232,10 +5266,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) /** * __ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance - * @completed_reqs: requests to complete + * @completed_reqs: bitmask that indicates which requests to complete + * @retry_requests: whether to ask the SCSI core to retry completed requests */ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, - unsigned long completed_reqs) + unsigned long completed_reqs, + bool retry_requests) { struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; @@ -5244,8 +5280,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, bool update_scaling = false; for_each_set_bit(index, &completed_reqs, hba->nutrs) { - if (!test_and_clear_bit(index, &hba->outstanding_reqs)) - continue; lrbp = &hba->lrb[index]; lrbp->compl_time_stamp = ktime_get(); cmd = lrbp->cmd; @@ -5253,7 +5287,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_update_monitor(hba, lrbp); ufshcd_add_command_trace(hba, index, UFS_CMD_COMP); - result = ufshcd_transfer_rsp_status(hba, lrbp); + result = retry_requests ? DID_BUS_BUSY << 16 : + ufshcd_transfer_rsp_status(hba, lrbp); scsi_dma_unmap(cmd); cmd->result = result; /* Mark completed command as NULL in LRB */ @@ -5277,17 +5312,19 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, } /** - * ufshcd_trc_handler - handle transfer requests completion + * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance - * @use_utrlcnr: get completed requests from UTRLCNR + * @retry_requests: whether or not to ask to retry requests * * Returns * IRQ_HANDLED - If interrupt is valid * IRQ_NONE - If invalid interrupt */ -static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr) +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba, + bool retry_requests) { - unsigned long completed_reqs = 0; + unsigned long completed_reqs, flags; + u32 tr_doorbell; /* Resetting interrupt aggregation counters first and reading the * DOOR_BELL afterward allows us to handle all the completed requests. @@ -5300,27 +5337,21 @@ static irqreturn_t ufshcd_trc_handler(struct ufs_hba *hba, bool use_utrlcnr) !(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR)) ufshcd_reset_intr_aggr(hba); - if (use_utrlcnr) { - u32 utrlcnr; + if (ufs_fail_completion()) + return IRQ_HANDLED; - utrlcnr = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_LIST_COMPL); - if (utrlcnr) { - ufshcd_writel(hba, utrlcnr, - REG_UTP_TRANSFER_REQ_LIST_COMPL); - completed_reqs = utrlcnr; - } - } else { - unsigned long flags; - u32 tr_doorbell; - - spin_lock_irqsave(hba->host->host_lock, flags); - tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - spin_unlock_irqrestore(hba->host->host_lock, flags); - } + spin_lock_irqsave(&hba->outstanding_lock, flags); + tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + completed_reqs = ~tr_doorbell & hba->outstanding_reqs; + WARN_ONCE(completed_reqs & ~hba->outstanding_reqs, + "completed: %#lx; outstanding: %#lx\n", completed_reqs, + hba->outstanding_reqs); + hba->outstanding_reqs &= ~completed_reqs; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); if (completed_reqs) { - __ufshcd_transfer_req_compl(hba, completed_reqs); + __ufshcd_transfer_req_compl(hba, completed_reqs, + retry_requests); return IRQ_HANDLED; } else { return IRQ_NONE; @@ -5799,7 +5830,13 @@ out: /* Complete requests that have door-bell cleared */ static void ufshcd_complete_requests(struct ufs_hba *hba) { - ufshcd_trc_handler(hba, false); + ufshcd_transfer_req_compl(hba, /*retry_requests=*/false); + ufshcd_tmc_handler(hba); +} + +static void ufshcd_retry_aborted_requests(struct ufs_hba *hba) +{ + ufshcd_transfer_req_compl(hba, /*retry_requests=*/true); ufshcd_tmc_handler(hba); } @@ -5874,27 +5911,6 @@ out: return err_handling; } -/* host lock must be held before calling this func */ -static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba) -{ - return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) || - (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)); -} - -/* host lock must be held before calling this func */ -static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba) -{ - /* handle fatal errors only when link is not in error state */ - if (hba->ufshcd_state != UFSHCD_STATE_ERROR) { - if (hba->force_reset || ufshcd_is_link_broken(hba) || - ufshcd_is_saved_err_fatal(hba)) - hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL; - else - hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL; - queue_work(hba->eh_wq, &hba->eh_work); - } -} - static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) { down_write(&hba->clk_scaling_lock); @@ -6028,11 +6044,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba) /** * ufshcd_err_handler - handle UFS errors that require s/w attention - * @work: pointer to work structure + * @host: SCSI host pointer */ -static void ufshcd_err_handler(struct work_struct *work) +static void ufshcd_err_handler(struct Scsi_Host *host) { - struct ufs_hba *hba; + struct ufs_hba *hba = shost_priv(host); unsigned long flags; bool err_xfer = false; bool err_tm = false; @@ -6040,10 +6056,9 @@ static void ufshcd_err_handler(struct work_struct *work) int tag; bool needs_reset = false, needs_restore = false; - hba = container_of(work, struct ufs_hba, eh_work); - down(&hba->host_sem); spin_lock_irqsave(hba->host->host_lock, flags); + hba->host->host_eh_scheduled = 0; if (ufshcd_err_handling_should_stop(hba)) { if (hba->ufshcd_state != UFSHCD_STATE_ERROR) hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; @@ -6141,8 +6156,7 @@ static void ufshcd_err_handler(struct work_struct *work) } lock_skip_pending_xfer_clear: - /* Complete the requests that are cleared by s/w */ - ufshcd_complete_requests(hba); + ufshcd_retry_aborted_requests(hba); spin_lock_irqsave(hba->host->host_lock, flags); hba->silence_err_logs = false; @@ -6357,7 +6371,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) "host_regs: "); ufshcd_print_pwr_info(hba); } - ufshcd_schedule_eh_work(hba); retval |= IRQ_HANDLED; } /* @@ -6369,6 +6382,10 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status) hba->errors = 0; hba->uic_error = 0; spin_unlock(hba->host->host_lock); + + if (queue_eh_work) + ufshcd_schedule_eh(hba); + return retval; } @@ -6440,7 +6457,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) retval |= ufshcd_tmc_handler(hba); if (intr_status & UTP_TRANSFER_REQ_COMPL) - retval |= ufshcd_trc_handler(hba, ufshcd_has_utrlcnr(hba)); + retval |= ufshcd_transfer_req_compl(hba, /*retry_requests=*/false); return retval; } @@ -6548,9 +6565,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, /* send command to the controller */ __set_bit(task_tag, &hba->outstanding_tasks); - /* Make sure descriptors are ready before ringing the task doorbell */ - wmb(); - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); @@ -6663,11 +6677,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, enum query_opcode desc_op) { struct request_queue *q = hba->cmd_queue; + DECLARE_COMPLETION_ONSTACK(wait); struct request *req; struct ufshcd_lrb *lrbp; int err = 0; int tag; - struct completion wait; u8 upiu_flags; down_read(&hba->clk_scaling_lock); @@ -6678,14 +6692,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, goto out_unlock; } tag = req->tag; - WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); + WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); if (unlikely(test_bit(tag, &hba->outstanding_reqs))) { err = -EBUSY; goto out; } - init_completion(&wait); lrbp = &hba->lrb[tag]; WARN_ON(lrbp->cmd); lrbp->cmd = NULL; @@ -6723,8 +6736,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, hba->dev_cmd.complete = &wait; ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - /* Make sure descriptors are ready before ringing the doorbell */ - wmb(); ufshcd_send_command(hba, tag); /* @@ -6865,7 +6876,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) err = ufshcd_clear_cmd(hba, pos); if (err) break; - __ufshcd_transfer_req_compl(hba, pos); + __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true); } } @@ -6981,33 +6992,24 @@ out: */ static int ufshcd_abort(struct scsi_cmnd *cmd) { - struct Scsi_Host *host; - struct ufs_hba *hba; + struct Scsi_Host *host = cmd->device->host; + struct ufs_hba *hba = shost_priv(host); + int tag = scsi_cmd_to_rq(cmd)->tag; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; unsigned long flags; - unsigned int tag; - int err = 0; - struct ufshcd_lrb *lrbp; + int err = FAILED; u32 reg; - host = cmd->device->host; - hba = shost_priv(host); - tag = cmd->request->tag; - lrbp = &hba->lrb[tag]; - if (!ufshcd_valid_tag(hba, tag)) { - dev_err(hba->dev, - "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p", - __func__, tag, cmd, cmd->request); - BUG(); - } + WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); ufshcd_hold(hba, false); reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - /* If command is already aborted/completed, return SUCCESS */ + /* If command is already aborted/completed, return FAILED. */ if (!(test_bit(tag, &hba->outstanding_reqs))) { dev_err(hba->dev, "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", __func__, tag, hba->outstanding_reqs, reg); - goto out; + goto release; } /* Print Transfer Request of aborted task */ @@ -7036,7 +7038,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) dev_err(hba->dev, "%s: cmd was completed, but without a notifying intr, tag = %d", __func__, tag); - goto cleanup; + __ufshcd_transfer_req_compl(hba, 1UL << tag, /*retry_requests=*/false); + goto release; } /* @@ -7045,40 +7048,39 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) * will be to send LU reset which, again, is a spec violation. * To avoid these unnecessary/illegal steps, first we clean up * the lrb taken by this cmd and re-set it in outstanding_reqs, - * then queue the eh_work and bail. + * then queue the error handler and bail. */ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) { ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun); - __ufshcd_transfer_req_compl(hba, (1UL << tag)); - set_bit(tag, &hba->outstanding_reqs); + spin_lock_irqsave(host->host_lock, flags); hba->force_reset = true; - ufshcd_schedule_eh_work(hba); spin_unlock_irqrestore(host->host_lock, flags); - goto out; + + ufshcd_schedule_eh(hba); + + goto release; } /* Skip task abort in case previous aborts failed and report failure */ - if (lrbp->req_abort_skip) - err = -EIO; - else - err = ufshcd_try_to_abort_task(hba, tag); + if (lrbp->req_abort_skip) { + dev_err(hba->dev, "%s: skipping abort\n", __func__); + ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); + goto release; + } - if (!err) { -cleanup: - __ufshcd_transfer_req_compl(hba, (1UL << tag)); -out: - err = SUCCESS; - } else { + err = ufshcd_try_to_abort_task(hba, tag); + if (err) { dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs); err = FAILED; + goto release; } - /* - * This ufshcd_release() corresponds to the original scsi cmd that got - * aborted here (as we won't get any IRQ for it). - */ + err = SUCCESS; + +release: + /* Matches the ufshcd_hold() call at the start of this function. */ ufshcd_release(hba); return err; } @@ -7101,9 +7103,10 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) * Stop the host controller and complete the requests * cleared by h/w */ + ufshpb_reset_host(hba); ufshcd_hba_stop(hba); hba->silence_err_logs = true; - ufshcd_complete_requests(hba); + ufshcd_retry_aborted_requests(hba); hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ @@ -7188,11 +7191,10 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) spin_lock_irqsave(hba->host->host_lock, flags); hba->force_reset = true; - ufshcd_schedule_eh_work(hba); dev_err(hba->dev, "%s: reset in progress - 1\n", __func__); spin_unlock_irqrestore(hba->host->host_lock, flags); - flush_work(&hba->eh_work); + ufshcd_err_handler(hba->host); spin_lock_irqsave(hba->host->host_lock, flags); if (hba->ufshcd_state == UFSHCD_STATE_ERROR) @@ -7499,6 +7501,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba) { int err; u8 model_index; + u8 b_ufs_feature_sup; u8 *desc_buf; struct ufs_dev_info *dev_info = &hba->dev_info; @@ -7526,9 +7529,26 @@ static int ufs_get_device_desc(struct ufs_hba *hba) /* getting Specification Version in big endian format */ dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; + b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION && + (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) { + bool hpb_en = false; + + ufshpb_get_dev_info(hba, desc_buf); + + if (!ufshpb_is_legacy(hba)) + err = ufshcd_query_flag_retry(hba, + UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_EN, 0, + &hpb_en); + + if (ufshpb_is_legacy(hba) || (!err && hpb_en)) + dev_info->hpb_enabled = true; + } + err = ufshcd_read_string_desc(hba, model_index, &dev_info->model, SD_ASCII_STD); if (err < 0) { @@ -7760,6 +7780,10 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba) else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) hba->dev_info.max_lu_supported = 8; + if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS) + ufshpb_get_geo_info(hba, desc_buf); + out: kfree(desc_buf); return err; @@ -7902,6 +7926,7 @@ static int ufshcd_add_lus(struct ufs_hba *hba) } ufs_bsg_probe(hba); + ufshpb_init(hba); scsi_scan_host(hba->host); pm_runtime_put_sync(hba->dev); @@ -7909,8 +7934,39 @@ out: return ret; } +static void ufshcd_request_sense_done(struct request *rq, blk_status_t error) +{ + if (error != BLK_STS_OK) + pr_err("%s: REQUEST SENSE failed (%d)", __func__, error); + blk_put_request(rq); +} + static int -ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp); +ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev) +{ + /* + * From SPC-6: the REQUEST SENSE command with any allocation length + * clears the sense data. + */ + static const u8 cmd[6] = {REQUEST_SENSE, 0, 0, 0, 0, 0}; + struct scsi_request *rq; + struct request *req; + + req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN, /*flags=*/0); + if (IS_ERR(req)) + return PTR_ERR(req); + + rq = scsi_req(req); + rq->cmd_len = ARRAY_SIZE(cmd); + memcpy(rq->cmd, cmd, rq->cmd_len); + rq->retries = 3; + req->timeout = 1 * HZ; + req->rq_flags |= RQF_PM | RQF_QUIET; + + blk_execute_rq_nowait(/*bd_disk=*/NULL, req, /*at_head=*/true, + ufshcd_request_sense_done); + return 0; +} static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun) { @@ -7938,7 +7994,7 @@ static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun) if (ret) goto out_err; - ret = ufshcd_send_request_sense(hba, sdp); + ret = ufshcd_request_sense_async(hba, sdp); scsi_device_put(sdp); out_err: if (ret) @@ -7967,13 +8023,13 @@ out: } /** - * ufshcd_probe_hba - probe hba to detect device and initialize + * ufshcd_probe_hba - probe hba to detect device and initialize it * @hba: per-adapter instance - * @async: asynchronous execution or not + * @init_dev_params: whether or not to call ufshcd_device_params_init(). * * Execute link-startup and verify device initialization */ -static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) +static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) { int ret; unsigned long flags; @@ -8005,7 +8061,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) * Initialize UFS device parameters used by driver, these * parameters are associated with UFS descriptors. */ - if (async) { + if (init_dev_params) { ret = ufshcd_device_params_init(hba); if (ret) goto out; @@ -8050,6 +8106,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + ufshpb_reset(hba); out: spin_lock_irqsave(hba->host->host_lock, flags); if (ret) @@ -8097,6 +8154,10 @@ out: static const struct attribute_group *ufshcd_driver_groups[] = { &ufs_sysfs_unit_descriptor_group, &ufs_sysfs_lun_attributes_group, +#ifdef CONFIG_SCSI_UFS_HPB + &ufs_sysfs_hpb_stat_group, + &ufs_sysfs_hpb_param_group, +#endif NULL, }; @@ -8521,8 +8582,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) if (hba->is_powered) { ufshcd_exit_clk_scaling(hba); ufshcd_exit_clk_gating(hba); - if (hba->eh_wq) - destroy_workqueue(hba->eh_wq); ufs_debugfs_hba_exit(hba); ufshcd_variant_hba_exit(hba); ufshcd_setup_vreg(hba, false); @@ -8533,35 +8592,6 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) } } -static int -ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp) -{ - unsigned char cmd[6] = {REQUEST_SENSE, - 0, - 0, - 0, - UFS_SENSE_SIZE, - 0}; - char *buffer; - int ret; - - buffer = kzalloc(UFS_SENSE_SIZE, GFP_KERNEL); - if (!buffer) { - ret = -ENOMEM; - goto out; - } - - ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer, - UFS_SENSE_SIZE, NULL, NULL, - msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL); - if (ret) - pr_err("%s: failed with err %d\n", __func__, ret); - - kfree(buffer); -out: - return ret; -} - /** * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device * power mode @@ -8739,6 +8769,7 @@ static void ufshcd_vreg_set_lpm(struct ufs_hba *hba) usleep_range(5000, 5100); } +#ifdef CONFIG_PM static int ufshcd_vreg_set_hpm(struct ufs_hba *hba) { int ret = 0; @@ -8766,6 +8797,7 @@ vcc_disable: out: return ret; } +#endif /* CONFIG_PM */ static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba) { @@ -8798,6 +8830,8 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) req_link_state = UIC_LINK_OFF_STATE; } + ufshpb_suspend(hba); + /* * If we can't transition into any of the low power modes * just gate the clocks. @@ -8921,6 +8955,7 @@ out: ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret); hba->clk_gating.is_suspended = false; ufshcd_release(hba); + ufshpb_resume(hba); } hba->pm_op_in_progress = false; return ret; @@ -8999,6 +9034,8 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + + ufshpb_resume(hba); goto out; set_old_link_state: @@ -9168,6 +9205,7 @@ static int ufshcd_suspend(struct ufs_hba *hba) return ret; } +#ifdef CONFIG_PM /** * ufshcd_resume - helper function for resume operations * @hba: per adapter instance @@ -9205,17 +9243,21 @@ out: ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret); return ret; } +#endif /* CONFIG_PM */ +#ifdef CONFIG_PM_SLEEP /** - * ufshcd_system_suspend - system suspend routine - * @hba: per adapter instance + * ufshcd_system_suspend - system suspend callback + * @dev: Device associated with the UFS controller. * - * Check the description of ufshcd_suspend() function for more details. + * Executed before putting the system into a sleep state in which the contents + * of main memory are preserved. * * Returns 0 for success and non-zero for failure */ -int ufshcd_system_suspend(struct ufs_hba *hba) +int ufshcd_system_suspend(struct device *dev) { + struct ufs_hba *hba = dev_get_drvdata(dev); int ret = 0; ktime_t start = ktime_get(); @@ -9232,16 +9274,19 @@ out: EXPORT_SYMBOL(ufshcd_system_suspend); /** - * ufshcd_system_resume - system resume routine - * @hba: per adapter instance + * ufshcd_system_resume - system resume callback + * @dev: Device associated with the UFS controller. + * + * Executed after waking the system up from a sleep state in which the contents + * of main memory were preserved. * * Returns 0 for success and non-zero for failure */ - -int ufshcd_system_resume(struct ufs_hba *hba) +int ufshcd_system_resume(struct device *dev) { - int ret = 0; + struct ufs_hba *hba = dev_get_drvdata(dev); ktime_t start = ktime_get(); + int ret = 0; if (pm_runtime_suspended(hba->dev)) goto out; @@ -9256,17 +9301,20 @@ out: return ret; } EXPORT_SYMBOL(ufshcd_system_resume); +#endif /* CONFIG_PM_SLEEP */ +#ifdef CONFIG_PM /** - * ufshcd_runtime_suspend - runtime suspend routine - * @hba: per adapter instance + * ufshcd_runtime_suspend - runtime suspend callback + * @dev: Device associated with the UFS controller. * * Check the description of ufshcd_suspend() function for more details. * * Returns 0 for success and non-zero for failure */ -int ufshcd_runtime_suspend(struct ufs_hba *hba) +int ufshcd_runtime_suspend(struct device *dev) { + struct ufs_hba *hba = dev_get_drvdata(dev); int ret; ktime_t start = ktime_get(); @@ -9281,7 +9329,7 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend); /** * ufshcd_runtime_resume - runtime resume routine - * @hba: per adapter instance + * @dev: Device associated with the UFS controller. * * This function basically brings controller * to active state. Following operations are done in this function: @@ -9289,8 +9337,9 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend); * 1. Turn on all the controller related clocks * 2. Turn ON VCC rail */ -int ufshcd_runtime_resume(struct ufs_hba *hba) +int ufshcd_runtime_resume(struct device *dev) { + struct ufs_hba *hba = dev_get_drvdata(dev); int ret; ktime_t start = ktime_get(); @@ -9302,12 +9351,7 @@ int ufshcd_runtime_resume(struct ufs_hba *hba) return ret; } EXPORT_SYMBOL(ufshcd_runtime_resume); - -int ufshcd_runtime_idle(struct ufs_hba *hba) -{ - return 0; -} -EXPORT_SYMBOL(ufshcd_runtime_idle); +#endif /* CONFIG_PM */ /** * ufshcd_shutdown - shutdown routine @@ -9343,6 +9387,7 @@ void ufshcd_remove(struct ufs_hba *hba) if (hba->sdev_ufs_device) ufshcd_rpm_get_sync(hba); ufs_bsg_remove(hba); + ufshpb_remove(hba); ufs_sysfs_remove_nodes(hba->dev); blk_cleanup_queue(hba->tmf_queue); blk_mq_free_tag_set(&hba->tmf_tag_set); @@ -9381,6 +9426,10 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba) return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); } +static struct scsi_transport_template ufshcd_transport_template = { + .eh_strategy_handler = ufshcd_err_handler, +}; + /** * ufshcd_alloc_host - allocate Host Bus Adapter (HBA) * @dev: pointer to device handle @@ -9407,13 +9456,15 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) err = -ENOMEM; goto out_error; } + host->transportt = &ufshcd_transport_template; hba = shost_priv(host); hba->host = host; hba->dev = dev; - *hba_handle = hba; hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; - INIT_LIST_HEAD(&hba->clk_list_head); + spin_lock_init(&hba->outstanding_lock); + + *hba_handle = hba; out_error: return err; @@ -9444,7 +9495,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) int err; struct Scsi_Host *host = hba->host; struct device *dev = hba->dev; - char eh_wq_name[sizeof("ufs_eh_wq_00")]; if (!mmio_base) { dev_err(hba->dev, @@ -9498,17 +9548,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->max_pwr_info.is_valid = false; - /* Initialize work queues */ - snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d", - hba->host->host_no); - hba->eh_wq = create_singlethread_workqueue(eh_wq_name); - if (!hba->eh_wq) { - dev_err(hba->dev, "%s: failed to create eh workqueue\n", - __func__); - err = -ENOMEM; - goto out_disable; - } - INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); sema_init(&hba->host_sem, 1); @@ -9627,6 +9666,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) async_schedule(ufshcd_async_scan, hba); ufs_sysfs_add_nodes(hba->dev); + device_enable_async_suspend(dev); return 0; free_tmf_queue: diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 194755c9ddfe..52ea6f350b18 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -476,6 +476,27 @@ struct ufs_stats { struct ufs_event_hist event[UFS_EVT_CNT]; }; +/** + * enum ufshcd_state - UFS host controller state + * @UFSHCD_STATE_RESET: Link is not operational. Postpone SCSI command + * processing. + * @UFSHCD_STATE_OPERATIONAL: The host controller is operational and can process + * SCSI commands. + * @UFSHCD_STATE_EH_SCHEDULED_NON_FATAL: The error handler has been scheduled. + * SCSI commands may be submitted to the controller. + * @UFSHCD_STATE_EH_SCHEDULED_FATAL: The error handler has been scheduled. Fail + * newly submitted SCSI commands with error code DID_BAD_TARGET. + * @UFSHCD_STATE_ERROR: An unrecoverable error occurred, e.g. link recovery + * failed. Fail all SCSI commands with error code DID_ERROR. + */ +enum ufshcd_state { + UFSHCD_STATE_RESET, + UFSHCD_STATE_OPERATIONAL, + UFSHCD_STATE_EH_SCHEDULED_NON_FATAL, + UFSHCD_STATE_EH_SCHEDULED_FATAL, + UFSHCD_STATE_ERROR, +}; + enum ufshcd_quirks { /* Interrupt aggregation support is broken */ UFSHCD_QUIRK_BROKEN_INTR_AGGR = 1 << 0, @@ -641,6 +662,31 @@ struct ufs_hba_variant_params { u32 wb_flush_threshold; }; +#ifdef CONFIG_SCSI_UFS_HPB +/** + * struct ufshpb_dev_info - UFSHPB device related info + * @num_lu: the number of user logical unit to check whether all lu finished + * initialization + * @rgn_size: device reported HPB region size + * @srgn_size: device reported HPB sub-region size + * @slave_conf_cnt: counter to check all lu finished initialization + * @hpb_disabled: flag to check if HPB is disabled + * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value + * @is_legacy: flag to check HPB 1.0 + * @control_mode: either host or device + */ +struct ufshpb_dev_info { + int num_lu; + int rgn_size; + int srgn_size; + atomic_t slave_conf_cnt; + bool hpb_disabled; + u8 max_hpb_single_cmd; + bool is_legacy; + u8 control_mode; +}; +#endif + struct ufs_hba_monitor { unsigned long chunk_size; @@ -674,6 +720,7 @@ struct ufs_hba_monitor { * @lrb: local reference block * @cmd_queue: Used to allocate command tags from hba->host->tag_set. * @outstanding_tasks: Bits representing outstanding task requests + * @outstanding_lock: Protects @outstanding_reqs. * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities * @nutrs: Transfer Request Queue depth supported by controller @@ -683,19 +730,17 @@ struct ufs_hba_monitor { * @priv: pointer to variant specific private data * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command - * @uic_cmd_mutex: mutex for uic command + * @uic_cmd_mutex: mutex for UIC command * @tmf_tag_set: TMF tag set. * @tmf_queue: Used to allocate TMF tags. * @pwr_done: completion for power mode change - * @ufshcd_state: UFSHCD states + * @ufshcd_state: UFSHCD state * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits * @ee_ctrl_mask: Exception event control mask * @is_powered: flag to check if HBA is powered * @shutting_down: flag to check if shutdown has been invoked * @host_sem: semaphore used to serialize concurrent contexts - * @eh_wq: Workqueue that eh_work works on - * @eh_work: Worker to handle UFS errors that require s/w attention * @eeh_work: Worker to handle exception events * @errors: HBA errors * @uic_error: UFS interconnect layer error status @@ -760,6 +805,7 @@ struct ufs_hba { struct ufshcd_lrb *lrb; unsigned long outstanding_tasks; + spinlock_t outstanding_lock; unsigned long outstanding_reqs; u32 capabilities; @@ -785,7 +831,7 @@ struct ufs_hba { struct mutex uic_cmd_mutex; struct completion *uic_async_done; - u32 ufshcd_state; + enum ufshcd_state ufshcd_state; u32 eh_flags; u32 intr_mask; u16 ee_ctrl_mask; /* Exception event mask */ @@ -797,8 +843,6 @@ struct ufs_hba { struct semaphore host_sem; /* Work Queues */ - struct workqueue_struct *eh_wq; - struct work_struct eh_work; struct work_struct eeh_work; /* HBA Errors */ @@ -851,6 +895,10 @@ struct ufs_hba { struct request_queue *bsg_queue; struct delayed_work rpm_dev_flush_recheck_work; +#ifdef CONFIG_SCSI_UFS_HPB + struct ufshpb_dev_info ufshpb_dev; +#endif + struct ufs_hba_monitor monitor; #ifdef CONFIG_SCSI_UFS_CRYPTO @@ -893,16 +941,8 @@ static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba) static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) { -/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/ -#ifndef CONFIG_SCSI_UFS_DWC - if ((hba->caps & UFSHCD_CAP_INTR_AGGR) && - !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR)) - return true; - else - return false; -#else -return true; -#endif + return (hba->caps & UFSHCD_CAP_INTR_AGGR) && + !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR); } static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba) @@ -1009,11 +1049,14 @@ static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba) return 0; } -extern int ufshcd_runtime_suspend(struct ufs_hba *hba); -extern int ufshcd_runtime_resume(struct ufs_hba *hba); -extern int ufshcd_runtime_idle(struct ufs_hba *hba); -extern int ufshcd_system_suspend(struct ufs_hba *hba); -extern int ufshcd_system_resume(struct ufs_hba *hba); +#ifdef CONFIG_PM +extern int ufshcd_runtime_suspend(struct device *dev); +extern int ufshcd_runtime_resume(struct device *dev); +#endif +#ifdef CONFIG_PM_SLEEP +extern int ufshcd_system_suspend(struct device *dev); +extern int ufshcd_system_resume(struct device *dev); +#endif extern int ufshcd_shutdown(struct ufs_hba *hba); extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba, int agreed_gear, @@ -1096,6 +1139,9 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, u8 param_offset, u8 *param_read_buf, u8 param_size); +int ufshcd_query_attr_retry(struct ufs_hba *hba, enum query_opcode opcode, + enum attr_idn idn, u8 index, u8 selector, + u32 *attr_val); int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, @@ -1160,11 +1206,6 @@ static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba) return ufshcd_readl(hba, REG_UFS_VERSION); } -static inline bool ufshcd_has_utrlcnr(struct ufs_hba *hba) -{ - return (hba->ufs_version >= ufshci_version(3, 0)); -} - static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba, bool up, enum ufs_notify_change_status status) { @@ -1226,18 +1267,6 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, return -ENOTSUPP; } -static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, - bool is_scsi_cmd) -{ - if (hba->vops && hba->vops->setup_xfer_req) { - unsigned long flags; - - spin_lock_irqsave(hba->host->host_lock, flags); - hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); - spin_unlock_irqrestore(hba->host->host_lock, flags); - } -} - static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, int tag, u8 tm_function) { diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 5affb1fce5ad..de95be5d11d4 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -39,7 +39,6 @@ enum { REG_UTP_TRANSFER_REQ_DOOR_BELL = 0x58, REG_UTP_TRANSFER_REQ_LIST_CLEAR = 0x5C, REG_UTP_TRANSFER_REQ_LIST_RUN_STOP = 0x60, - REG_UTP_TRANSFER_REQ_LIST_COMPL = 0x64, REG_UTP_TASK_REQ_LIST_BASE_L = 0x70, REG_UTP_TASK_REQ_LIST_BASE_H = 0x74, REG_UTP_TASK_REQ_DOOR_BELL = 0x78, diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c new file mode 100644 index 000000000000..9acce92a356b --- /dev/null +++ b/drivers/scsi/ufs/ufshpb.c @@ -0,0 +1,2933 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee <ymhungry.lee@samsung.com> + * Jinyoung Choi <j-young.choi@samsung.com> + */ + +#include <asm/unaligned.h> +#include <linux/async.h> + +#include "ufshcd.h" +#include "ufshpb.h" +#include "../sd.h" + +#define ACTIVATION_THRESHOLD 8 /* 8 IOs */ +#define READ_TO_MS 1000 +#define READ_TO_EXPIRIES 100 +#define POLLING_INTERVAL_MS 200 +#define THROTTLE_MAP_REQ_DEFAULT 1 + +/* memory management */ +static struct kmem_cache *ufshpb_mctx_cache; +static mempool_t *ufshpb_mctx_pool; +static mempool_t *ufshpb_page_pool; +/* A cache size of 2MB can cache ppn in the 1GB range. */ +static unsigned int ufshpb_host_map_kbytes = 2048; +static int tot_active_srgn_pages; + +static struct workqueue_struct *ufshpb_wq; + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx); + +bool ufshpb_is_allowed(struct ufs_hba *hba) +{ + return !(hba->ufshpb_dev.hpb_disabled); +} + +/* HPB version 1.0 is called as legacy version. */ +bool ufshpb_is_legacy(struct ufs_hba *hba) +{ + return hba->ufshpb_dev.is_legacy; +} + +static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev) +{ + return sdev->hostdata; +} + +static int ufshpb_get_state(struct ufshpb_lu *hpb) +{ + return atomic_read(&hpb->hpb_state); +} + +static void ufshpb_set_state(struct ufshpb_lu *hpb, int state) +{ + atomic_set(&hpb->hpb_state, state); +} + +static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + return rgn->rgn_state != HPB_RGN_INACTIVE && + srgn->srgn_state == HPB_SRGN_VALID; +} + +static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd) +{ + return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ; +} + +static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd) +{ + return op_is_write(req_op(scsi_cmd_to_rq(cmd))) || + op_is_discard(req_op(scsi_cmd_to_rq(cmd))); +} + +static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len) +{ + return transfer_len <= hpb->pre_req_max_tr_len; +} + +/* + * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as + * default. It is possible to change range of transfer_len through sysfs. + */ +static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len) +{ + return len > hpb->pre_req_min_tr_len && + len <= hpb->pre_req_max_tr_len; +} + +static bool ufshpb_is_general_lun(int lun) +{ + return lun < UFS_UPIU_MAX_UNIT_NUM_ID; +} + +static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx) +{ + if (hpb->lu_pinned_end != PINNED_NOT_SET && + rgn_idx >= hpb->lu_pinned_start && + rgn_idx <= hpb->lu_pinned_end) + return true; + + return false; +} + +static void ufshpb_kick_map_work(struct ufshpb_lu *hpb) +{ + bool ret = false; + unsigned long flags; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + return; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn)) + ret = true; + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + if (ret) + queue_work(ufshpb_wq, &hpb->map_work); +} + +static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp, + struct utp_hpb_rsp *rsp_field) +{ + /* Check HPB_UPDATE_ALERT */ + if (!(lrbp->ucd_rsp_ptr->header.dword_2 & + UPIU_HEADER_DWORD(0, 2, 0, 0))) + return false; + + if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN || + rsp_field->desc_type != DEV_DES_TYPE || + rsp_field->additional_len != DEV_ADDITIONAL_LEN || + rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM || + rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM || + rsp_field->hpb_op == HPB_RSP_NONE || + (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE && + !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt)) + return false; + + if (!ufshpb_is_general_lun(rsp_field->lun)) { + dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n", + lrbp->lun); + return false; + } + + return true; +} + +static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx, + int srgn_offset, int cnt, bool set_dirty) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn, *prev_srgn = NULL; + int set_bit_len; + int bitmap_len; + unsigned long flags; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + + if ((srgn_offset + cnt) > bitmap_len) + set_bit_len = bitmap_len - srgn_offset; + else + set_bit_len = cnt; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + if (set_dirty) { + if (srgn->srgn_state == HPB_SRGN_VALID) + bitmap_set(srgn->mctx->ppn_dirty, srgn_offset, + set_bit_len); + } else if (hpb->is_hcm) { + /* rewind the read timer for lru regions */ + rgn->read_timeout = ktime_add_ms(ktime_get(), + rgn->hpb->params.read_timeout_ms); + rgn->read_timeout_expiries = + rgn->hpb->params.read_timeout_expiries; + } + } + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (hpb->is_hcm && prev_srgn != srgn) { + bool activate = false; + + spin_lock(&rgn->rgn_lock); + if (set_dirty) { + rgn->reads -= srgn->reads; + srgn->reads = 0; + set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + } else { + srgn->reads++; + rgn->reads++; + if (srgn->reads == hpb->params.activation_thld) + activate = true; + } + spin_unlock(&rgn->rgn_lock); + + if (activate || + test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_active_info(hpb, rgn_idx, srgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate region %d-%d\n", rgn_idx, srgn_idx); + } + + prev_srgn = srgn; + } + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= set_bit_len; + if (cnt > 0) + goto next_srgn; +} + +static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx, int srgn_offset, int cnt) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int bitmap_len; + int bit_len; + +next_srgn: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (likely(!srgn->is_last)) + bitmap_len = hpb->entries_per_srgn; + else + bitmap_len = hpb->last_srgn_entries; + + if (!ufshpb_is_valid_srgn(rgn, srgn)) + return true; + + /* + * If the region state is active, mctx must be allocated. + * In this case, check whether the region is evicted or + * mctx allcation fail. + */ + if (unlikely(!srgn->mctx)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + return true; + } + + if ((srgn_offset + cnt) > bitmap_len) + bit_len = bitmap_len - srgn_offset; + else + bit_len = cnt; + + if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset, + srgn_offset) < bit_len + srgn_offset) + return true; + + srgn_offset = 0; + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + + cnt -= bit_len; + if (cnt > 0) + goto next_srgn; + + return false; +} + +static inline bool is_rgn_dirty(struct ufshpb_region *rgn) +{ + return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); +} + +static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx, int pos, + int len, __be64 *ppn_buf) +{ + struct page *page; + int index, offset; + int copied; + + index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE); + offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE); + + if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE)) + copied = len; + else + copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset; + + page = mctx->m_page[index]; + if (unlikely(!page)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "error. cannot find page in mctx\n"); + return -ENOMEM; + } + + memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE), + copied * HPB_ENTRY_SIZE); + + return copied; +} + +static void +ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx, + int *srgn_idx, int *offset) +{ + int rgn_offset; + + *rgn_idx = lpn >> hpb->entries_per_rgn_shift; + rgn_offset = lpn & hpb->entries_per_rgn_mask; + *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift; + *offset = rgn_offset & hpb->entries_per_srgn_mask; +} + +static void +ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb, + struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn, + u8 transfer_len, int read_id) +{ + unsigned char *cdb = lrbp->cmd->cmnd; + __be64 ppn_tmp = ppn; + cdb[0] = UFSHPB_READ; + + if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ) + ppn_tmp = swab64(ppn); + + /* ppn value is stored as big-endian in the host memory */ + memcpy(&cdb[6], &ppn_tmp, sizeof(__be64)); + cdb[14] = transfer_len; + cdb[15] = read_id; + + lrbp->cmd->cmd_len = UFS_CDB_SIZE; +} + +static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb, + unsigned long lpn, unsigned int len, + int read_id) +{ + cdb[0] = UFSHPB_WRITE_BUFFER; + cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID; + + put_unaligned_be32(lpn, &cdb[2]); + cdb[6] = read_id; + put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]); + + cdb[9] = 0x00; /* Control = 0x00 */ +} + +static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req; + + if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) { + dev_info(&hpb->sdev_ufs_lu->sdev_dev, + "pre_req throttle. inflight %d throttle %d", + hpb->num_inflight_pre_req, hpb->throttle_pre_req); + return NULL; + } + + pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free, + struct ufshpb_req, list_req); + if (!pre_req) { + dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req"); + return NULL; + } + + list_del_init(&pre_req->list_req); + hpb->num_inflight_pre_req++; + + return pre_req; +} + +static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb, + struct ufshpb_req *pre_req) +{ + pre_req->req = NULL; + bio_reset(pre_req->bio); + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); + hpb->num_inflight_pre_req--; +} + +static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data; + struct ufshpb_lu *hpb = pre_req->hpb; + unsigned long flags; + + if (error) { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); + struct scsi_sense_hdr sshdr; + + dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error); + scsi_command_normalize_sense(cmd, &sshdr); + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "code %x sense_key %x asc %x ascq %x", + sshdr.response_code, + sshdr.sense_key, sshdr.asc, sshdr.ascq); + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "byte4 %x byte5 %x byte6 %x additional_len %x", + sshdr.byte4, sshdr.byte5, + sshdr.byte6, sshdr.additional_length); + } + + blk_mq_free_request(req); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_put_pre_req(pre_req->hpb, pre_req); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); +} + +static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page) +{ + struct ufshpb_lu *hpb = pre_req->hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + __be64 *addr; + int offset = 0; + int copied; + unsigned long lpn = pre_req->wb.lpn; + int rgn_idx, srgn_idx, srgn_offset; + unsigned long flags; + + addr = page_address(page); + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + +next_offset: + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + if (!ufshpb_is_valid_srgn(rgn, srgn)) + goto mctx_error; + + if (!srgn->mctx) + goto mctx_error; + + copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, + pre_req->wb.len - offset, + &addr[offset]); + + if (copied < 0) + goto mctx_error; + + offset += copied; + srgn_offset += copied; + + if (srgn_offset == hpb->entries_per_srgn) { + srgn_offset = 0; + + if (++srgn_idx == hpb->srgns_per_rgn) { + srgn_idx = 0; + rgn_idx++; + } + } + + if (offset < pre_req->wb.len) + goto next_offset; + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return 0; +mctx_error: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return -ENOMEM; +} + +static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb, + struct request_queue *q, + struct ufshpb_req *pre_req) +{ + struct page *page = pre_req->wb.m_page; + struct bio *bio = pre_req->bio; + int entries_bytes, ret; + + if (!page) + return -ENOMEM; + + if (ufshpb_prep_entry(pre_req, page)) + return -ENOMEM; + + entries_bytes = pre_req->wb.len * sizeof(__be64); + + ret = bio_add_pc_page(q, bio, page, entries_bytes, 0); + if (ret != entries_bytes) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "bio_add_pc_page fail: %d", ret); + return -ENOMEM; + } + return 0; +} + +static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb) +{ + if (++hpb->cur_read_id >= MAX_HPB_READ_ID) + hpb->cur_read_id = 1; + return hpb->cur_read_id; +} + +static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, + struct ufshpb_req *pre_req, int read_id) +{ + struct scsi_device *sdev = cmd->device; + struct request_queue *q = sdev->request_queue; + struct request *req; + struct scsi_request *rq; + struct bio *bio = pre_req->bio; + + pre_req->hpb = hpb; + pre_req->wb.lpn = sectors_to_logical(cmd->device, + blk_rq_pos(scsi_cmd_to_rq(cmd))); + pre_req->wb.len = sectors_to_logical(cmd->device, + blk_rq_sectors(scsi_cmd_to_rq(cmd))); + if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req)) + return -ENOMEM; + + req = pre_req->req; + + /* 1. request setup */ + blk_rq_append_bio(req, bio); + req->rq_disk = NULL; + req->end_io_data = (void *)pre_req; + req->end_io = ufshpb_pre_req_compl_fn; + + /* 2. scsi_request setup */ + rq = scsi_req(req); + rq->retries = 1; + + ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len, + read_id); + rq->cmd_len = scsi_command_size(rq->cmd); + + if (blk_insert_cloned_request(q, req) != BLK_STS_OK) + return -EAGAIN; + + hpb->stats.pre_req_cnt++; + + return 0; +} + +static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd, + int *read_id) +{ + struct ufshpb_req *pre_req; + struct request *req = NULL; + unsigned long flags; + int _read_id; + int ret = 0; + + req = blk_get_request(cmd->device->request_queue, + REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT); + if (IS_ERR(req)) + return -EAGAIN; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + pre_req = ufshpb_get_pre_req(hpb); + if (!pre_req) { + ret = -EAGAIN; + goto unlock_out; + } + _read_id = ufshpb_get_read_id(hpb); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + pre_req->req = req; + + ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id); + if (ret) + goto free_pre_req; + + *read_id = _read_id; + + return ret; +free_pre_req: + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_put_pre_req(hpb, pre_req); +unlock_out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + blk_put_request(req); + return ret; +} + +/* + * This function will set up HPB read command using host-side L2P map data. + */ +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb; + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + struct scsi_cmnd *cmd = lrbp->cmd; + u32 lpn; + __be64 ppn; + unsigned long flags; + int transfer_len, rgn_idx, srgn_idx, srgn_offset; + int read_id = 0; + int err = 0; + + hpb = ufshpb_get_hpb_data(cmd->device); + if (!hpb) + return -ENODEV; + + if (ufshpb_get_state(hpb) == HPB_INIT) + return -ENODEV; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT", __func__); + return -ENODEV; + } + + if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) || + (!ufshpb_is_write_or_discard(cmd) && + !ufshpb_is_read_cmd(cmd))) + return 0; + + transfer_len = sectors_to_logical(cmd->device, + blk_rq_sectors(scsi_cmd_to_rq(cmd))); + if (unlikely(!transfer_len)) + return 0; + + lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd))); + ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset); + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + /* If command type is WRITE or DISCARD, set bitmap as drity */ + if (ufshpb_is_write_or_discard(cmd)) { + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len, true); + return 0; + } + + if (!ufshpb_is_supported_chunk(hpb, transfer_len)) + return 0; + + WARN_ON_ONCE(transfer_len > HPB_MULTI_CHUNK_HIGH); + + if (hpb->is_hcm) { + /* + * in host control mode, reads are the main source for + * activation trials. + */ + ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len, false); + + /* keep those counters normalized */ + if (rgn->reads > hpb->entries_per_srgn) + schedule_work(&hpb->ufshpb_normalization_work); + } + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset, + transfer_len)) { + hpb->stats.miss_cnt++; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return 0; + } + + err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + if (unlikely(err < 0)) { + /* + * In this case, the region state is active, + * but the ppn table is not allocated. + * Make sure that ppn table must be allocated on + * active state. + */ + dev_err(hba->dev, "get ppn failed. err %d\n", err); + return err; + } + if (!ufshpb_is_legacy(hba) && + ufshpb_is_required_wb(hpb, transfer_len)) { + err = ufshpb_issue_pre_req(hpb, cmd, &read_id); + if (err) { + unsigned long timeout; + + timeout = cmd->jiffies_at_alloc + msecs_to_jiffies( + hpb->params.requeue_timeout_ms); + + if (time_before(jiffies, timeout)) + return -EAGAIN; + + hpb->stats.miss_cnt++; + return 0; + } + } + + ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len, + read_id); + + hpb->stats.hit_cnt++; + return 0; +} + +static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, + int rgn_idx, enum req_opf dir, + bool atomic) +{ + struct ufshpb_req *rq; + struct request *req; + int retries = HPB_MAP_REQ_RETRIES; + + rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL); + if (!rq) + return NULL; + +retry: + req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir, + BLK_MQ_REQ_NOWAIT); + + if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) { + usleep_range(3000, 3100); + goto retry; + } + + if (IS_ERR(req)) + goto free_rq; + + rq->hpb = hpb; + rq->req = req; + rq->rb.rgn_idx = rgn_idx; + + return rq; + +free_rq: + kmem_cache_free(hpb->map_req_cache, rq); + return NULL; +} + +static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq) +{ + blk_put_request(rq->req); + kmem_cache_free(hpb->map_req_cache, rq); +} + +static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + struct bio *bio; + unsigned long flags; + + if (hpb->is_hcm && + hpb->num_inflight_map_req >= hpb->params.inflight_map_req) { + dev_info(&hpb->sdev_ufs_lu->sdev_dev, + "map_req throttle. inflight %d throttle %d", + hpb->num_inflight_map_req, + hpb->params.inflight_map_req); + return NULL; + } + + map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false); + if (!map_req) + return NULL; + + bio = bio_alloc(GFP_KERNEL, hpb->pages_per_srgn); + if (!bio) { + ufshpb_put_req(hpb, map_req); + return NULL; + } + + map_req->bio = bio; + + map_req->rb.srgn_idx = srgn->srgn_idx; + map_req->rb.mctx = srgn->mctx; + + spin_lock_irqsave(&hpb->param_lock, flags); + hpb->num_inflight_map_req++; + spin_unlock_irqrestore(&hpb->param_lock, flags); + + return map_req; +} + +static void ufshpb_put_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req) +{ + unsigned long flags; + + bio_put(map_req->bio); + ufshpb_put_req(hpb, map_req); + + spin_lock_irqsave(&hpb->param_lock, flags); + hpb->num_inflight_map_req--; + spin_unlock_irqrestore(&hpb->param_lock, flags); +} + +static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + u32 num_entries = hpb->entries_per_srgn; + + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + return -1; + } + + if (unlikely(srgn->is_last)) + num_entries = hpb->last_srgn_entries; + + bitmap_zero(srgn->mctx->ppn_dirty, num_entries); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags); + + return 0; +} + +static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx, + int srgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + + rgn = hpb->rgn_tbl + rgn_idx; + srgn = rgn->srgn_tbl + srgn_idx; + + list_del_init(&rgn->list_inact_rgn); + + if (list_empty(&srgn->list_act_srgn)) + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + + hpb->stats.rb_active_cnt++; +} + +static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int srgn_idx; + + rgn = hpb->rgn_tbl + rgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + list_del_init(&srgn->list_act_srgn); + + if (list_empty(&rgn->list_inact_rgn)) + list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn); + + hpb->stats.rb_inactive_cnt++; +} + +static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_region *rgn; + + /* + * If there is no mctx in subregion + * after I/O progress for HPB_READ_BUFFER, the region to which the + * subregion belongs was evicted. + * Make sure the region must not evict in I/O progress + */ + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "no mctx in region %d subregion %d.\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + + if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "region %d subregion %d evicted\n", + srgn->rgn_idx, srgn->srgn_idx); + srgn->srgn_state = HPB_SRGN_INVALID; + return; + } + srgn->srgn_state = HPB_SRGN_VALID; +} + +static void ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data; + + ufshpb_put_req(umap_req->hpb, umap_req); +} + +static void ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) +{ + struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data; + struct ufshpb_lu *hpb = map_req->hpb; + struct ufshpb_subregion *srgn; + unsigned long flags; + + srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl + + map_req->rb.srgn_idx; + + ufshpb_clear_dirty_bitmap(hpb, srgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + ufshpb_activate_subregion(hpb, srgn); + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + ufshpb_put_map_req(map_req->hpb, map_req); +} + +static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn) +{ + cdb[0] = UFSHPB_WRITE_BUFFER; + cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID : + UFSHPB_WRITE_BUFFER_INACT_ALL_ID; + if (rgn) + put_unaligned_be16(rgn->rgn_idx, &cdb[2]); + cdb[9] = 0x00; +} + +static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx, + int srgn_idx, int srgn_mem_size) +{ + cdb[0] = UFSHPB_READ_BUFFER; + cdb[1] = UFSHPB_READ_BUFFER_ID; + + put_unaligned_be16(rgn_idx, &cdb[2]); + put_unaligned_be16(srgn_idx, &cdb[4]); + put_unaligned_be24(srgn_mem_size, &cdb[6]); + + cdb[9] = 0x00; +} + +static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_req *umap_req, + struct ufshpb_region *rgn) +{ + struct request *req; + struct scsi_request *rq; + + req = umap_req->req; + req->timeout = 0; + req->end_io_data = (void *)umap_req; + rq = scsi_req(req); + ufshpb_set_unmap_cmd(rq->cmd, rgn); + rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn); + + hpb->stats.umap_req_cnt++; +} + +static int ufshpb_execute_map_req(struct ufshpb_lu *hpb, + struct ufshpb_req *map_req, bool last) +{ + struct request_queue *q; + struct request *req; + struct scsi_request *rq; + int mem_size = hpb->srgn_mem_size; + int ret = 0; + int i; + + q = hpb->sdev_ufs_lu->request_queue; + for (i = 0; i < hpb->pages_per_srgn; i++) { + ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i], + PAGE_SIZE, 0); + if (ret != PAGE_SIZE) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "bio_add_pc_page fail %d - %d\n", + map_req->rb.rgn_idx, map_req->rb.srgn_idx); + return ret; + } + } + + req = map_req->req; + + blk_rq_append_bio(req, map_req->bio); + + req->end_io_data = map_req; + + rq = scsi_req(req); + + if (unlikely(last)) + mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE; + + ufshpb_set_read_buf_cmd(rq->cmd, map_req->rb.rgn_idx, + map_req->rb.srgn_idx, mem_size); + rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH; + + blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn); + + hpb->stats.map_req_cnt++; + return 0; +} + +static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb, + bool last) +{ + struct ufshpb_map_ctx *mctx; + u32 num_entries = hpb->entries_per_srgn; + int i, j; + + mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL); + if (!mctx) + return NULL; + + mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL); + if (!mctx->m_page) + goto release_mctx; + + if (unlikely(last)) + num_entries = hpb->last_srgn_entries; + + mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL); + if (!mctx->ppn_dirty) + goto release_m_page; + + for (i = 0; i < hpb->pages_per_srgn; i++) { + mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL); + if (!mctx->m_page[i]) { + for (j = 0; j < i; j++) + mempool_free(mctx->m_page[j], ufshpb_page_pool); + goto release_ppn_dirty; + } + clear_page(page_address(mctx->m_page[i])); + } + + return mctx; + +release_ppn_dirty: + bitmap_free(mctx->ppn_dirty); +release_m_page: + kmem_cache_free(hpb->m_page_cache, mctx->m_page); +release_mctx: + mempool_free(mctx, ufshpb_mctx_pool); + return NULL; +} + +static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb, + struct ufshpb_map_ctx *mctx) +{ + int i; + + for (i = 0; i < hpb->pages_per_srgn; i++) + mempool_free(mctx->m_page[i], ufshpb_page_pool); + + bitmap_free(mctx->ppn_dirty); + kmem_cache_free(hpb->m_page_cache, mctx->m_page); + mempool_free(mctx, ufshpb_mctx_pool); +} + +static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state == HPB_SRGN_ISSUED) + return -EPERM; + + return 0; +} + +static void ufshpb_read_to_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, + ufshpb_read_to_work.work); + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *next_rgn; + unsigned long flags; + unsigned int poll; + LIST_HEAD(expired_list); + + if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits)) + return; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn, + list_lru_rgn) { + bool timedout = ktime_after(ktime_get(), rgn->read_timeout); + + if (timedout) { + rgn->read_timeout_expiries--; + if (is_rgn_dirty(rgn) || + rgn->read_timeout_expiries == 0) + list_add(&rgn->list_expired_rgn, &expired_list); + else + rgn->read_timeout = ktime_add_ms(ktime_get(), + hpb->params.read_timeout_ms); + } + } + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + list_for_each_entry_safe(rgn, next_rgn, &expired_list, + list_expired_rgn) { + list_del_init(&rgn->list_expired_rgn); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + ufshpb_kick_map_work(hpb); + + clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits); + + poll = hpb->params.timeout_polling_interval_ms; + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(poll)); +} + +static void ufshpb_add_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + rgn->rgn_state = HPB_RGN_ACTIVE; + list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); + atomic_inc(&lru_info->active_cnt); + if (rgn->hpb->is_hcm) { + rgn->read_timeout = + ktime_add_ms(ktime_get(), + rgn->hpb->params.read_timeout_ms); + rgn->read_timeout_expiries = + rgn->hpb->params.read_timeout_expiries; + } +} + +static void ufshpb_hit_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn); +} + +static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn, *victim_rgn = NULL; + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) { + if (!rgn) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: no region allocated\n", + __func__); + return NULL; + } + if (ufshpb_check_srgns_issue_state(hpb, rgn)) + continue; + + /* + * in host control mode, verify that the exiting region + * has fewer reads + */ + if (hpb->is_hcm && + rgn->reads > hpb->params.eviction_thld_exit) + continue; + + victim_rgn = rgn; + break; + } + + return victim_rgn; +} + +static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info, + struct ufshpb_region *rgn) +{ + list_del_init(&rgn->list_lru_rgn); + rgn->rgn_state = HPB_RGN_INACTIVE; + atomic_dec(&lru_info->active_cnt); +} + +static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb, + struct ufshpb_subregion *srgn) +{ + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + ufshpb_put_map_ctx(hpb, srgn->mctx); + srgn->srgn_state = HPB_SRGN_UNUSED; + srgn->mctx = NULL; + } +} + +static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + bool atomic) +{ + struct ufshpb_req *umap_req; + int rgn_idx = rgn ? rgn->rgn_idx : 0; + + umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic); + if (!umap_req) + return -ENOMEM; + + ufshpb_execute_umap_req(hpb, umap_req, rgn); + + return 0; +} + +static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + return ufshpb_issue_umap_req(hpb, rgn, true); +} + +static int ufshpb_issue_umap_all_req(struct ufshpb_lu *hpb) +{ + return ufshpb_issue_umap_req(hpb, NULL, false); +} + +static void __ufshpb_evict_region(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct victim_select_info *lru_info; + struct ufshpb_subregion *srgn; + int srgn_idx; + + lru_info = &hpb->lru_info; + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx); + + ufshpb_cleanup_lru_info(lru_info, rgn); + + for_each_sub_region(rgn, srgn_idx, srgn) + ufshpb_purge_active_subregion(hpb, srgn); +} + +static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (rgn->rgn_state == HPB_RGN_PINNED) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "pinned region cannot drop-out. region %d\n", + rgn->rgn_idx); + goto out; + } + + if (!list_empty(&rgn->list_lru_rgn)) { + if (ufshpb_check_srgns_issue_state(hpb, rgn)) { + ret = -EBUSY; + goto out; + } + + if (hpb->is_hcm) { + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + ret = ufshpb_issue_umap_single_req(hpb, rgn); + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + if (ret) + goto out; + } + + __ufshpb_evict_region(hpb, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} + +static int ufshpb_issue_map_req(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + struct ufshpb_req *map_req; + unsigned long flags; + int ret; + int err = -EAGAIN; + bool alloc_required = false; + enum HPB_SRGN_STATE state = HPB_SRGN_INVALID; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + goto unlock_out; + } + + if ((rgn->rgn_state == HPB_RGN_INACTIVE) && + (srgn->srgn_state == HPB_SRGN_INVALID)) { + err = 0; + goto unlock_out; + } + + if (srgn->srgn_state == HPB_SRGN_UNUSED) + alloc_required = true; + + /* + * If the subregion is already ISSUED state, + * a specific event (e.g., GC or wear-leveling, etc.) occurs in + * the device and HPB response for map loading is received. + * In this case, after finishing the HPB_READ_BUFFER, + * the next HPB_READ_BUFFER is performed again to obtain the latest + * map data. + */ + if (srgn->srgn_state == HPB_SRGN_ISSUED) + goto unlock_out; + + srgn->srgn_state = HPB_SRGN_ISSUED; + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + + if (alloc_required) { + srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); + if (!srgn->mctx) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "get map_ctx failed. region %d - %d\n", + rgn->rgn_idx, srgn->srgn_idx); + state = HPB_SRGN_UNUSED; + goto change_srgn_state; + } + } + + map_req = ufshpb_get_map_req(hpb, srgn); + if (!map_req) + goto change_srgn_state; + + + ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "%s: issue map_req failed: %d, region %d - %d\n", + __func__, ret, srgn->rgn_idx, srgn->srgn_idx); + goto free_map_req; + } + return 0; + +free_map_req: + ufshpb_put_map_req(hpb, map_req); +change_srgn_state: + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + srgn->srgn_state = state; +unlock_out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return err; +} + +static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn) +{ + struct ufshpb_region *victim_rgn = NULL; + struct victim_select_info *lru_info = &hpb->lru_info; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + /* + * If region belongs to lru_list, just move the region + * to the front of lru list because the state of the region + * is already active-state. + */ + if (!list_empty(&rgn->list_lru_rgn)) { + ufshpb_hit_lru_info(lru_info, rgn); + goto out; + } + + if (rgn->rgn_state == HPB_RGN_INACTIVE) { + if (atomic_read(&lru_info->active_cnt) == + lru_info->max_lru_active_cnt) { + /* + * If the maximum number of active regions + * is exceeded, evict the least recently used region. + * This case may occur when the device responds + * to the eviction information late. + * It is okay to evict the least recently used region, + * because the device could detect this region + * by not issuing HPB_READ + * + * in host control mode, verify that the entering + * region has enough reads + */ + if (hpb->is_hcm && + rgn->reads < hpb->params.eviction_thld_enter) { + ret = -EACCES; + goto out; + } + + victim_rgn = ufshpb_victim_lru_info(hpb); + if (!victim_rgn) { + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "cannot get victim region %s\n", + hpb->is_hcm ? "" : "error"); + ret = -ENOMEM; + goto out; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "LRU full (%d), choose victim %d\n", + atomic_read(&lru_info->active_cnt), + victim_rgn->rgn_idx); + + if (hpb->is_hcm) { + spin_unlock_irqrestore(&hpb->rgn_state_lock, + flags); + ret = ufshpb_issue_umap_single_req(hpb, + victim_rgn); + spin_lock_irqsave(&hpb->rgn_state_lock, + flags); + if (ret) + goto out; + } + + __ufshpb_evict_region(hpb, victim_rgn); + } + + /* + * When a region is added to lru_info list_head, + * it is guaranteed that the subregion has been + * assigned all mctx. If failed, try to receive mctx again + * without being added to lru_info list_head + */ + ufshpb_add_lru_info(lru_info, rgn); + } +out: + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); + return ret; +} + +static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb, + struct utp_hpb_rsp *rsp_field) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + int i, rgn_i, srgn_i; + + BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE); + /* + * If the active region and the inactive region are the same, + * we will inactivate this region. + * The device could check this (region inactivated) and + * will response the proper active region information + */ + for (i = 0; i < rsp_field->active_rgn_cnt; i++) { + rgn_i = + be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn); + srgn_i = + be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn); + + rgn = hpb->rgn_tbl + rgn_i; + if (hpb->is_hcm && + (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) { + /* + * in host control mode, subregion activation + * recommendations are only allowed to active regions. + * Also, ignore recommendations for dirty regions - the + * host will make decisions concerning those by himself + */ + continue; + } + + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "activate(%d) region %d - %d\n", i, rgn_i, srgn_i); + + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_active_info(hpb, rgn_i, srgn_i); + spin_unlock(&hpb->rsp_list_lock); + + srgn = rgn->srgn_tbl + srgn_i; + + /* blocking HPB_READ */ + spin_lock(&hpb->rgn_state_lock); + if (srgn->srgn_state == HPB_SRGN_VALID) + srgn->srgn_state = HPB_SRGN_INVALID; + spin_unlock(&hpb->rgn_state_lock); + } + + if (hpb->is_hcm) { + /* + * in host control mode the device is not allowed to inactivate + * regions + */ + goto out; + } + + for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) { + rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]); + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, + "inactivate(%d) region %d\n", i, rgn_i); + + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, rgn_i); + spin_unlock(&hpb->rsp_list_lock); + + rgn = hpb->rgn_tbl + rgn_i; + + spin_lock(&hpb->rgn_state_lock); + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + for (srgn_i = 0; srgn_i < rgn->srgn_cnt; srgn_i++) { + srgn = rgn->srgn_tbl + srgn_i; + if (srgn->srgn_state == HPB_SRGN_VALID) + srgn->srgn_state = HPB_SRGN_INVALID; + } + } + spin_unlock(&hpb->rgn_state_lock); + + } + +out: + dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n", + rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt); + + if (ufshpb_get_state(hpb) == HPB_PRESENT) + queue_work(ufshpb_wq, &hpb->map_work); +} + +static void ufshpb_dev_reset_handler(struct ufshpb_lu *hpb) +{ + struct victim_select_info *lru_info = &hpb->lru_info; + struct ufshpb_region *rgn; + unsigned long flags; + + spin_lock_irqsave(&hpb->rgn_state_lock, flags); + + list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) + set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags); + + spin_unlock_irqrestore(&hpb->rgn_state_lock, flags); +} + +/* + * This function will parse recommended active subregion information in sense + * data field of response UPIU with SAM_STAT_GOOD state. + */ +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device); + struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr; + int data_seg_len; + + if (unlikely(lrbp->lun != rsp_field->lun)) { + struct scsi_device *sdev; + bool found = false; + + __shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb) + continue; + + if (rsp_field->lun == hpb->lun) { + found = true; + break; + } + } + + if (!found) + return; + } + + if (!hpb) + return; + + if (ufshpb_get_state(hpb) == HPB_INIT) + return; + + if ((ufshpb_get_state(hpb) != HPB_PRESENT) && + (ufshpb_get_state(hpb) != HPB_SUSPEND)) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT/SUSPEND\n", + __func__); + return; + } + + data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) + & MASK_RSP_UPIU_DATA_SEG_LEN; + + /* To flush remained rsp_list, we queue the map_work task */ + if (!data_seg_len) { + if (!ufshpb_is_general_lun(hpb->lun)) + return; + + ufshpb_kick_map_work(hpb); + return; + } + + BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE); + + if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field)) + return; + + hpb->stats.rb_noti_cnt++; + + switch (rsp_field->hpb_op) { + case HPB_RSP_REQ_REGION_UPDATE: + if (data_seg_len != DEV_DATA_SEG_LEN) + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "%s: data seg length is not same.\n", + __func__); + ufshpb_rsp_req_region_update(hpb, rsp_field); + break; + case HPB_RSP_DEV_RESET: + dev_warn(&hpb->sdev_ufs_lu->sdev_dev, + "UFS device lost HPB information during PM.\n"); + + if (hpb->is_hcm) { + struct scsi_device *sdev; + + __shost_for_each_device(sdev, hba->host) { + struct ufshpb_lu *h = sdev->hostdata; + + if (h) + ufshpb_dev_reset_handler(h); + } + } + + break; + default: + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "hpb_op is not available: %d\n", + rsp_field->hpb_op); + break; + } +} + +static void ufshpb_add_active_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct ufshpb_subregion *srgn) +{ + if (!list_empty(&rgn->list_inact_rgn)) + return; + + if (!list_empty(&srgn->list_act_srgn)) { + list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn); + return; + } + + list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn); +} + +static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, + struct list_head *pending_list) +{ + struct ufshpb_subregion *srgn; + int srgn_idx; + + if (!list_empty(&rgn->list_inact_rgn)) + return; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (!list_empty(&srgn->list_act_srgn)) + return; + + list_add_tail(&rgn->list_inact_rgn, pending_list); +} + +static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + struct ufshpb_subregion *srgn; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn, + struct ufshpb_subregion, + list_act_srgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + rgn = hpb->rgn_tbl + srgn->rgn_idx; + ret = ufshpb_add_region(hpb, rgn); + if (ret) + goto active_failed; + + ret = ufshpb_issue_map_req(hpb, rgn, srgn); + if (ret) { + dev_err(&hpb->sdev_ufs_lu->sdev_dev, + "issue map_req failed. ret %d, region %d - %d\n", + ret, rgn->rgn_idx, srgn->srgn_idx); + goto active_failed; + } + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + return; + +active_failed: + dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n", + rgn->rgn_idx, srgn->srgn_idx); + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_active_list(hpb, rgn, srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn; + unsigned long flags; + int ret; + LIST_HEAD(pending_list); + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn, + struct ufshpb_region, + list_inact_rgn))) { + if (ufshpb_get_state(hpb) == HPB_SUSPEND) + break; + + list_del_init(&rgn->list_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + + ret = ufshpb_evict_region(hpb, rgn); + if (ret) { + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + ufshpb_add_pending_evict_list(hpb, rgn, &pending_list); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); + } + + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + } + + list_splice(&pending_list, &hpb->lh_inact_rgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_normalization_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, + ufshpb_normalization_work); + int rgn_idx; + u8 factor = hpb->params.normalization_factor; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx; + int srgn_idx; + + spin_lock(&rgn->rgn_lock); + rgn->reads = 0; + for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) { + struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx; + + srgn->reads >>= factor; + rgn->reads += srgn->reads; + } + spin_unlock(&rgn->rgn_lock); + + if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads) + continue; + + /* if region is active but has no reads - inactivate it */ + spin_lock(&hpb->rsp_list_lock); + ufshpb_update_inactive_info(hpb, rgn->rgn_idx); + spin_unlock(&hpb->rsp_list_lock); + } +} + +static void ufshpb_map_work_handler(struct work_struct *work) +{ + struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work); + + if (ufshpb_get_state(hpb) != HPB_PRESENT) { + dev_notice(&hpb->sdev_ufs_lu->sdev_dev, + "%s: ufshpb state is not PRESENT\n", __func__); + return; + } + + ufshpb_run_inactive_region_list(hpb); + ufshpb_run_active_subregion_list(hpb); +} + +/* + * this function doesn't need to hold lock due to be called in init. + * (rgn_state_lock, rsp_list_lock, etc..) + */ +static int ufshpb_init_pinned_active_region(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + struct ufshpb_subregion *srgn; + int srgn_idx, i; + int err = 0; + + for_each_sub_region(rgn, srgn_idx, srgn) { + srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last); + srgn->srgn_state = HPB_SRGN_INVALID; + if (!srgn->mctx) { + err = -ENOMEM; + dev_err(hba->dev, + "alloc mctx for pinned region failed\n"); + goto release; + } + + list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn); + } + + rgn->rgn_state = HPB_RGN_PINNED; + return 0; + +release: + for (i = 0; i < srgn_idx; i++) { + srgn = rgn->srgn_tbl + i; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } + return err; +} + +static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, bool last) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for_each_sub_region(rgn, srgn_idx, srgn) { + INIT_LIST_HEAD(&srgn->list_act_srgn); + + srgn->rgn_idx = rgn->rgn_idx; + srgn->srgn_idx = srgn_idx; + srgn->srgn_state = HPB_SRGN_UNUSED; + } + + if (unlikely(last && hpb->last_srgn_entries)) + srgn->is_last = true; +} + +static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn, int srgn_cnt) +{ + rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion), + GFP_KERNEL); + if (!rgn->srgn_tbl) + return -ENOMEM; + + rgn->srgn_cnt = srgn_cnt; + return 0; +} + +static void ufshpb_lu_parameter_init(struct ufs_hba *hba, + struct ufshpb_lu *hpb, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + u32 entries_per_rgn; + u64 rgn_mem_size, tmp; + + /* for pre_req */ + hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1; + + if (ufshpb_is_legacy(hba)) + hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH; + else + hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH; + + hpb->cur_read_id = 0; + + hpb->lu_pinned_start = hpb_lu_info->pinned_start; + hpb->lu_pinned_end = hpb_lu_info->num_pinned ? + (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1) + : PINNED_NOT_SET; + hpb->lru_info.max_lru_active_cnt = + hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned; + + rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT + * HPB_ENTRY_SIZE; + do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE); + hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size) + * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE; + + tmp = rgn_mem_size; + do_div(tmp, HPB_ENTRY_SIZE); + entries_per_rgn = (u32)tmp; + hpb->entries_per_rgn_shift = ilog2(entries_per_rgn); + hpb->entries_per_rgn_mask = entries_per_rgn - 1; + + hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE; + hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn); + hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1; + + tmp = rgn_mem_size; + do_div(tmp, hpb->srgn_mem_size); + hpb->srgns_per_rgn = (int)tmp; + + hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + entries_per_rgn); + hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks, + (hpb->srgn_mem_size / HPB_ENTRY_SIZE)); + hpb->last_srgn_entries = hpb_lu_info->num_blocks + % (hpb->srgn_mem_size / HPB_ENTRY_SIZE); + + hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE); + + if (hpb_dev_info->control_mode == HPB_HOST_CONTROL) + hpb->is_hcm = true; +} + +static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn_table, *rgn; + int rgn_idx, i; + int ret = 0; + + rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region), + GFP_KERNEL); + if (!rgn_table) + return -ENOMEM; + + hpb->rgn_tbl = rgn_table; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + int srgn_cnt = hpb->srgns_per_rgn; + bool last_srgn = false; + + rgn = rgn_table + rgn_idx; + rgn->rgn_idx = rgn_idx; + + spin_lock_init(&rgn->rgn_lock); + + INIT_LIST_HEAD(&rgn->list_inact_rgn); + INIT_LIST_HEAD(&rgn->list_lru_rgn); + INIT_LIST_HEAD(&rgn->list_expired_rgn); + + if (rgn_idx == hpb->rgns_per_lu - 1) { + srgn_cnt = ((hpb->srgns_per_lu - 1) % + hpb->srgns_per_rgn) + 1; + last_srgn = true; + } + + ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt); + if (ret) + goto release_srgn_table; + ufshpb_init_subregion_tbl(hpb, rgn, last_srgn); + + if (ufshpb_is_pinned_region(hpb, rgn_idx)) { + ret = ufshpb_init_pinned_active_region(hba, hpb, rgn); + if (ret) + goto release_srgn_table; + } else { + rgn->rgn_state = HPB_RGN_INACTIVE; + } + + rgn->rgn_flags = 0; + rgn->hpb = hpb; + } + + return 0; + +release_srgn_table: + for (i = 0; i < rgn_idx; i++) + kvfree(rgn_table[i].srgn_tbl); + + kvfree(rgn_table); + return ret; +} + +static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb, + struct ufshpb_region *rgn) +{ + int srgn_idx; + struct ufshpb_subregion *srgn; + + for_each_sub_region(rgn, srgn_idx, srgn) + if (srgn->srgn_state != HPB_SRGN_UNUSED) { + srgn->srgn_state = HPB_SRGN_UNUSED; + ufshpb_put_map_ctx(hpb, srgn->mctx); + } +} + +static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb) +{ + int rgn_idx; + + for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) { + struct ufshpb_region *rgn; + + rgn = hpb->rgn_tbl + rgn_idx; + if (rgn->rgn_state != HPB_RGN_INACTIVE) { + rgn->rgn_state = HPB_RGN_INACTIVE; + + ufshpb_destroy_subregion_tbl(hpb, rgn); + } + + kvfree(rgn->srgn_tbl); + } + + kvfree(hpb->rgn_tbl); +} + +/* SYSFS functions */ +#define ufshpb_sysfs_attr_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + if (!hpb) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \ +} \ +\ +static DEVICE_ATTR_RO(__name) + +ufshpb_sysfs_attr_show_func(hit_cnt); +ufshpb_sysfs_attr_show_func(miss_cnt); +ufshpb_sysfs_attr_show_func(rb_noti_cnt); +ufshpb_sysfs_attr_show_func(rb_active_cnt); +ufshpb_sysfs_attr_show_func(rb_inactive_cnt); +ufshpb_sysfs_attr_show_func(map_req_cnt); +ufshpb_sysfs_attr_show_func(umap_req_cnt); + +static struct attribute *hpb_dev_stat_attrs[] = { + &dev_attr_hit_cnt.attr, + &dev_attr_miss_cnt.attr, + &dev_attr_rb_noti_cnt.attr, + &dev_attr_rb_active_cnt.attr, + &dev_attr_rb_inactive_cnt.attr, + &dev_attr_map_req_cnt.attr, + &dev_attr_umap_req_cnt.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_stat_group = { + .name = "hpb_stats", + .attrs = hpb_dev_stat_attrs, +}; + +/* SYSFS functions */ +#define ufshpb_sysfs_param_show_func(__name) \ +static ssize_t __name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct scsi_device *sdev = to_scsi_device(dev); \ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \ + \ + if (!hpb) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%d\n", hpb->params.__name); \ +} + +ufshpb_sysfs_param_show_func(requeue_timeout_ms); +static ssize_t +requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val < 0) + return -EINVAL; + + hpb->params.requeue_timeout_ms = val; + + return count; +} +static DEVICE_ATTR_RW(requeue_timeout_ms); + +ufshpb_sysfs_param_show_func(activation_thld); +static ssize_t +activation_thld_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0) + return -EINVAL; + + hpb->params.activation_thld = val; + + return count; +} +static DEVICE_ATTR_RW(activation_thld); + +ufshpb_sysfs_param_show_func(normalization_factor); +static ssize_t +normalization_factor_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0 || val > ilog2(hpb->entries_per_srgn)) + return -EINVAL; + + hpb->params.normalization_factor = val; + + return count; +} +static DEVICE_ATTR_RW(normalization_factor); + +ufshpb_sysfs_param_show_func(eviction_thld_enter); +static ssize_t +eviction_thld_enter_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= hpb->params.eviction_thld_exit) + return -EINVAL; + + hpb->params.eviction_thld_enter = val; + + return count; +} +static DEVICE_ATTR_RW(eviction_thld_enter); + +ufshpb_sysfs_param_show_func(eviction_thld_exit); +static ssize_t +eviction_thld_exit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= hpb->params.activation_thld) + return -EINVAL; + + hpb->params.eviction_thld_exit = val; + + return count; +} +static DEVICE_ATTR_RW(eviction_thld_exit); + +ufshpb_sysfs_param_show_func(read_timeout_ms); +static ssize_t +read_timeout_ms_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + /* read_timeout >> timeout_polling_interval */ + if (val < hpb->params.timeout_polling_interval_ms * 2) + return -EINVAL; + + hpb->params.read_timeout_ms = val; + + return count; +} +static DEVICE_ATTR_RW(read_timeout_ms); + +ufshpb_sysfs_param_show_func(read_timeout_expiries); +static ssize_t +read_timeout_expiries_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0) + return -EINVAL; + + hpb->params.read_timeout_expiries = val; + + return count; +} +static DEVICE_ATTR_RW(read_timeout_expiries); + +ufshpb_sysfs_param_show_func(timeout_polling_interval_ms); +static ssize_t +timeout_polling_interval_ms_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + /* timeout_polling_interval << read_timeout */ + if (val <= 0 || val > hpb->params.read_timeout_ms / 2) + return -EINVAL; + + hpb->params.timeout_polling_interval_ms = val; + + return count; +} +static DEVICE_ATTR_RW(timeout_polling_interval_ms); + +ufshpb_sysfs_param_show_func(inflight_map_req); +static ssize_t inflight_map_req_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + int val; + + if (!hpb) + return -ENODEV; + + if (!hpb->is_hcm) + return -EOPNOTSUPP; + + if (kstrtouint(buf, 0, &val)) + return -EINVAL; + + if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1) + return -EINVAL; + + hpb->params.inflight_map_req = val; + + return count; +} +static DEVICE_ATTR_RW(inflight_map_req); + +static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb) +{ + hpb->params.activation_thld = ACTIVATION_THRESHOLD; + hpb->params.normalization_factor = 1; + hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5); + hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4); + hpb->params.read_timeout_ms = READ_TO_MS; + hpb->params.read_timeout_expiries = READ_TO_EXPIRIES; + hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS; + hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT; +} + +static struct attribute *hpb_dev_param_attrs[] = { + &dev_attr_requeue_timeout_ms.attr, + &dev_attr_activation_thld.attr, + &dev_attr_normalization_factor.attr, + &dev_attr_eviction_thld_enter.attr, + &dev_attr_eviction_thld_exit.attr, + &dev_attr_read_timeout_ms.attr, + &dev_attr_read_timeout_expiries.attr, + &dev_attr_timeout_polling_interval_ms.attr, + &dev_attr_inflight_map_req.attr, + NULL, +}; + +struct attribute_group ufs_sysfs_hpb_param_group = { + .name = "hpb_params", + .attrs = hpb_dev_param_attrs, +}; + +static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req = NULL, *t; + int qd = hpb->sdev_ufs_lu->queue_depth / 2; + int i; + + INIT_LIST_HEAD(&hpb->lh_pre_req_free); + + hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL); + hpb->throttle_pre_req = qd; + hpb->num_inflight_pre_req = 0; + + if (!hpb->pre_req) + goto release_mem; + + for (i = 0; i < qd; i++) { + pre_req = hpb->pre_req + i; + INIT_LIST_HEAD(&pre_req->list_req); + pre_req->req = NULL; + + pre_req->bio = bio_alloc(GFP_KERNEL, 1); + if (!pre_req->bio) + goto release_mem; + + pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pre_req->wb.m_page) { + bio_put(pre_req->bio); + goto release_mem; + } + + list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free); + } + + return 0; +release_mem: + list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) { + list_del_init(&pre_req->list_req); + bio_put(pre_req->bio); + __free_page(pre_req->wb.m_page); + } + + kfree(hpb->pre_req); + return -ENOMEM; +} + +static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb) +{ + struct ufshpb_req *pre_req = NULL; + int i; + + for (i = 0; i < hpb->throttle_pre_req; i++) { + pre_req = hpb->pre_req + i; + bio_put(hpb->pre_req[i].bio); + if (!pre_req->wb.m_page) + __free_page(hpb->pre_req[i].wb.m_page); + list_del_init(&pre_req->list_req); + } + + kfree(hpb->pre_req); +} + +static void ufshpb_stat_init(struct ufshpb_lu *hpb) +{ + hpb->stats.hit_cnt = 0; + hpb->stats.miss_cnt = 0; + hpb->stats.rb_noti_cnt = 0; + hpb->stats.rb_active_cnt = 0; + hpb->stats.rb_inactive_cnt = 0; + hpb->stats.map_req_cnt = 0; + hpb->stats.umap_req_cnt = 0; +} + +static void ufshpb_param_init(struct ufshpb_lu *hpb) +{ + hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS; + if (hpb->is_hcm) + ufshpb_hcm_param_init(hpb); +} + +static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb) +{ + int ret; + + spin_lock_init(&hpb->rgn_state_lock); + spin_lock_init(&hpb->rsp_list_lock); + spin_lock_init(&hpb->param_lock); + + INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn); + INIT_LIST_HEAD(&hpb->lh_act_srgn); + INIT_LIST_HEAD(&hpb->lh_inact_rgn); + INIT_LIST_HEAD(&hpb->list_hpb_lu); + + INIT_WORK(&hpb->map_work, ufshpb_map_work_handler); + if (hpb->is_hcm) { + INIT_WORK(&hpb->ufshpb_normalization_work, + ufshpb_normalization_work_handler); + INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work, + ufshpb_read_to_handler); + } + + hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache", + sizeof(struct ufshpb_req), 0, 0, NULL); + if (!hpb->map_req_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail", + hpb->lun); + return -ENOMEM; + } + + hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache", + sizeof(struct page *) * hpb->pages_per_srgn, + 0, 0, NULL); + if (!hpb->m_page_cache) { + dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail", + hpb->lun); + ret = -ENOMEM; + goto release_req_cache; + } + + ret = ufshpb_pre_req_mempool_init(hpb); + if (ret) { + dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail", + hpb->lun); + goto release_m_page_cache; + } + + ret = ufshpb_alloc_region_tbl(hba, hpb); + if (ret) + goto release_pre_req_mempool; + + ufshpb_stat_init(hpb); + ufshpb_param_init(hpb); + + if (hpb->is_hcm) { + unsigned int poll; + + poll = hpb->params.timeout_polling_interval_ms; + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(poll)); + } + + return 0; + +release_pre_req_mempool: + ufshpb_pre_req_mempool_destroy(hpb); +release_m_page_cache: + kmem_cache_destroy(hpb->m_page_cache); +release_req_cache: + kmem_cache_destroy(hpb->map_req_cache); + return ret; +} + +static struct ufshpb_lu * +ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev, + struct ufshpb_dev_info *hpb_dev_info, + struct ufshpb_lu_info *hpb_lu_info) +{ + struct ufshpb_lu *hpb; + int ret; + + hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL); + if (!hpb) + return NULL; + + hpb->lun = sdev->lun; + hpb->sdev_ufs_lu = sdev; + + ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info); + + ret = ufshpb_lu_hpb_init(hba, hpb); + if (ret) { + dev_err(hba->dev, "hpb lu init failed. ret %d", ret); + goto release_hpb; + } + + sdev->hostdata = hpb; + return hpb; + +release_hpb: + kfree(hpb); + return NULL; +} + +static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb) +{ + struct ufshpb_region *rgn, *next_rgn; + struct ufshpb_subregion *srgn, *next_srgn; + unsigned long flags; + + /* + * If the device reset occurred, the remaining HPB region information + * may be stale. Therefore, by discarding the lists of HPB response + * that remained after reset, we prevent unnecessary work. + */ + spin_lock_irqsave(&hpb->rsp_list_lock, flags); + list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn, + list_inact_rgn) + list_del_init(&rgn->list_inact_rgn); + + list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn, + list_act_srgn) + list_del_init(&srgn->list_act_srgn); + spin_unlock_irqrestore(&hpb->rsp_list_lock, flags); +} + +static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb) +{ + if (hpb->is_hcm) { + cancel_delayed_work_sync(&hpb->ufshpb_read_to_work); + cancel_work_sync(&hpb->ufshpb_normalization_work); + } + cancel_work_sync(&hpb->map_work); +} + +static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba) +{ + int err = 0; + bool flag_res = true; + int try; + + /* wait for the device to complete HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + dev_dbg(hba->dev, + "%s start flag reset polling %d times\n", + __func__, try); + + /* Poll fHpbReset flag to be cleared */ + err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res); + + if (err) { + dev_err(hba->dev, + "%s reading fHpbReset flag failed with error %d\n", + __func__, err); + return flag_res; + } + + if (!flag_res) + goto out; + + usleep_range(1000, 1100); + } + if (flag_res) { + dev_err(hba->dev, + "%s fHpbReset was not cleared by the device\n", + __func__); + } +out: + return flag_res; +} + +void ufshpb_reset(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_RESET) + continue; + + ufshpb_set_state(hpb, HPB_PRESENT); + } +} + +void ufshpb_reset_host(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + continue; + ufshpb_set_state(hpb, HPB_RESET); + ufshpb_cancel_jobs(hpb); + ufshpb_discard_rsp_lists(hpb); + } +} + +void ufshpb_suspend(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (ufshpb_get_state(hpb) != HPB_PRESENT) + continue; + ufshpb_set_state(hpb, HPB_SUSPEND); + ufshpb_cancel_jobs(hpb); + } +} + +void ufshpb_resume(struct ufs_hba *hba) +{ + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if ((ufshpb_get_state(hpb) != HPB_PRESENT) && + (ufshpb_get_state(hpb) != HPB_SUSPEND)) + continue; + ufshpb_set_state(hpb, HPB_PRESENT); + ufshpb_kick_map_work(hpb); + if (hpb->is_hcm) { + unsigned int poll = + hpb->params.timeout_polling_interval_ms; + + schedule_delayed_work(&hpb->ufshpb_read_to_work, + msecs_to_jiffies(poll)); + } + } +} + +static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun, + struct ufshpb_lu_info *hpb_lu_info) +{ + u16 max_active_rgns; + u8 lu_enable; + int size; + int ret; + char desc_buf[QUERY_DESC_MAX_SIZE]; + + ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size); + + pm_runtime_get_sync(hba->dev); + ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, + QUERY_DESC_IDN_UNIT, lun, 0, + desc_buf, &size); + pm_runtime_put_sync(hba->dev); + + if (ret) { + dev_err(hba->dev, + "%s: idn: %d lun: %d query request failed", + __func__, QUERY_DESC_IDN_UNIT, lun); + return ret; + } + + lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE]; + if (lu_enable != LU_ENABLED_HPB_FUNC) + return -ENODEV; + + max_active_rgns = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS); + if (!max_active_rgns) { + dev_err(hba->dev, + "lun %d wrong number of max active regions\n", lun); + return -ENODEV; + } + + hpb_lu_info->num_blocks = get_unaligned_be64( + desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT); + hpb_lu_info->pinned_start = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF); + hpb_lu_info->num_pinned = get_unaligned_be16( + desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS); + hpb_lu_info->max_active_rgns = max_active_rgns; + + return 0; +} + +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); + + if (!hpb) + return; + + ufshpb_set_state(hpb, HPB_FAILED); + + sdev = hpb->sdev_ufs_lu; + sdev->hostdata = NULL; + + ufshpb_cancel_jobs(hpb); + + ufshpb_pre_req_mempool_destroy(hpb); + ufshpb_destroy_region_tbl(hpb); + + kmem_cache_destroy(hpb->map_req_cache); + kmem_cache_destroy(hpb->m_page_cache); + + list_del_init(&hpb->list_hpb_lu); + + kfree(hpb); +} + +static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) +{ + int pool_size; + struct ufshpb_lu *hpb; + struct scsi_device *sdev; + bool init_success; + + if (tot_active_srgn_pages == 0) { + ufshpb_remove(hba); + return; + } + + init_success = !ufshpb_check_hpb_reset_query(hba); + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + if (pool_size > tot_active_srgn_pages) { + mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages); + mempool_resize(ufshpb_page_pool, tot_active_srgn_pages); + } + + shost_for_each_device(sdev, hba->host) { + hpb = ufshpb_get_hpb_data(sdev); + if (!hpb) + continue; + + if (init_success) { + ufshpb_set_state(hpb, HPB_PRESENT); + if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0) + queue_work(ufshpb_wq, &hpb->map_work); + if (!hpb->is_hcm) + ufshpb_issue_umap_all_req(hpb); + } else { + dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun); + ufshpb_destroy_lu(hba, sdev); + } + } + + if (!init_success) + ufshpb_remove(hba); +} + +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) +{ + struct ufshpb_lu *hpb; + int ret; + struct ufshpb_lu_info hpb_lu_info = { 0 }; + int lun = sdev->lun; + + if (lun >= hba->dev_info.max_lu_supported) + goto out; + + ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info); + if (ret) + goto out; + + hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev, + &hpb_lu_info); + if (!hpb) + goto out; + + tot_active_srgn_pages += hpb_lu_info.max_active_rgns * + hpb->srgns_per_rgn * hpb->pages_per_srgn; + +out: + /* All LUs are initialized */ + if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt)) + ufshpb_hpb_lu_prepared(hba); +} + +static int ufshpb_init_mem_wq(struct ufs_hba *hba) +{ + int ret; + unsigned int pool_size; + + ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache", + sizeof(struct ufshpb_map_ctx), + 0, 0, NULL); + if (!ufshpb_mctx_cache) { + dev_err(hba->dev, "ufshpb: cannot init mctx cache\n"); + return -ENOMEM; + } + + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n", + __func__, __LINE__, ufshpb_host_map_kbytes, pool_size); + + ufshpb_mctx_pool = mempool_create_slab_pool(pool_size, + ufshpb_mctx_cache); + if (!ufshpb_mctx_pool) { + dev_err(hba->dev, "ufshpb: cannot init mctx pool\n"); + ret = -ENOMEM; + goto release_mctx_cache; + } + + ufshpb_page_pool = mempool_create_page_pool(pool_size, 0); + if (!ufshpb_page_pool) { + dev_err(hba->dev, "ufshpb: cannot init page pool\n"); + ret = -ENOMEM; + goto release_mctx_pool; + } + + ufshpb_wq = alloc_workqueue("ufshpb-wq", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + if (!ufshpb_wq) { + dev_err(hba->dev, "ufshpb: alloc workqueue failed\n"); + ret = -ENOMEM; + goto release_page_pool; + } + + return 0; + +release_page_pool: + mempool_destroy(ufshpb_page_pool); +release_mctx_pool: + mempool_destroy(ufshpb_mctx_pool); +release_mctx_cache: + kmem_cache_destroy(ufshpb_mctx_cache); + return ret; +} + +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) +{ + struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev; + int max_active_rgns = 0; + int hpb_num_lu; + + hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU]; + if (hpb_num_lu == 0) { + dev_err(hba->dev, "No HPB LU supported\n"); + hpb_info->hpb_disabled = true; + return; + } + + hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE]; + hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE]; + max_active_rgns = get_unaligned_be16(geo_buf + + GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS); + + if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 || + max_active_rgns == 0) { + dev_err(hba->dev, "No HPB supported device\n"); + hpb_info->hpb_disabled = true; + return; + } +} + +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int version, ret; + u32 max_hpb_single_cmd = HPB_MULTI_CHUNK_LOW; + + hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL]; + + version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER); + if ((version != HPB_SUPPORT_VERSION) && + (version != HPB_SUPPORT_LEGACY_VERSION)) { + dev_err(hba->dev, "%s: HPB %x version is not supported.\n", + __func__, version); + hpb_dev_info->hpb_disabled = true; + return; + } + + if (version == HPB_SUPPORT_LEGACY_VERSION) + hpb_dev_info->is_legacy = true; + + pm_runtime_get_sync(hba->dev); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_hpb_single_cmd); + pm_runtime_put_sync(hba->dev); + + if (ret) + dev_err(hba->dev, "%s: idn: read max size of single hpb cmd query request failed", + __func__); + hpb_dev_info->max_hpb_single_cmd = max_hpb_single_cmd; + + /* + * Get the number of user logical unit to check whether all + * scsi_device finish initialization + */ + hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU]; +} + +void ufshpb_init(struct ufs_hba *hba) +{ + struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev; + int try; + int ret; + + if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled) + return; + + if (ufshpb_init_mem_wq(hba)) { + hpb_dev_info->hpb_disabled = true; + return; + } + + atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu); + tot_active_srgn_pages = 0; + /* issue HPB reset query */ + for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) { + ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG, + QUERY_FLAG_IDN_HPB_RESET, 0, NULL); + if (!ret) + break; + } +} + +void ufshpb_remove(struct ufs_hba *hba) +{ + mempool_destroy(ufshpb_page_pool); + mempool_destroy(ufshpb_mctx_pool); + kmem_cache_destroy(ufshpb_mctx_cache); + + destroy_workqueue(ufshpb_wq); +} + +module_param(ufshpb_host_map_kbytes, uint, 0644); +MODULE_PARM_DESC(ufshpb_host_map_kbytes, + "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool"); diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h new file mode 100644 index 000000000000..a79e07398970 --- /dev/null +++ b/drivers/scsi/ufs/ufshpb.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Universal Flash Storage Host Performance Booster + * + * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd. + * + * Authors: + * Yongmyung Lee <ymhungry.lee@samsung.com> + * Jinyoung Choi <j-young.choi@samsung.com> + */ + +#ifndef _UFSHPB_H_ +#define _UFSHPB_H_ + +/* hpb response UPIU macro */ +#define HPB_RSP_NONE 0x0 +#define HPB_RSP_REQ_REGION_UPDATE 0x1 +#define HPB_RSP_DEV_RESET 0x2 +#define MAX_ACTIVE_NUM 2 +#define MAX_INACTIVE_NUM 2 +#define DEV_DATA_SEG_LEN 0x14 +#define DEV_SENSE_SEG_LEN 0x12 +#define DEV_DES_TYPE 0x80 +#define DEV_ADDITIONAL_LEN 0x10 + +/* hpb map & entries macro */ +#define HPB_RGN_SIZE_UNIT 512 +#define HPB_ENTRY_BLOCK_SIZE 4096 +#define HPB_ENTRY_SIZE 0x8 +#define PINNED_NOT_SET U32_MAX + +/* hpb support chunk size */ +#define HPB_LEGACY_CHUNK_HIGH 1 +#define HPB_MULTI_CHUNK_LOW 7 +#define HPB_MULTI_CHUNK_HIGH 255 + +/* hpb vender defined opcode */ +#define UFSHPB_READ 0xF8 +#define UFSHPB_READ_BUFFER 0xF9 +#define UFSHPB_READ_BUFFER_ID 0x01 +#define UFSHPB_WRITE_BUFFER 0xFA +#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01 +#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02 +#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03 +#define HPB_WRITE_BUFFER_CMD_LENGTH 10 +#define MAX_HPB_READ_ID 0x7F +#define HPB_READ_BUFFER_CMD_LENGTH 10 +#define LU_ENABLED_HPB_FUNC 0x02 + +#define HPB_RESET_REQ_RETRIES 10 +#define HPB_MAP_REQ_RETRIES 5 +#define HPB_REQUEUE_TIME_MS 0 + +#define HPB_SUPPORT_VERSION 0x200 +#define HPB_SUPPORT_LEGACY_VERSION 0x100 + +enum UFSHPB_MODE { + HPB_HOST_CONTROL, + HPB_DEVICE_CONTROL, +}; + +enum UFSHPB_STATE { + HPB_INIT = 0, + HPB_PRESENT = 1, + HPB_SUSPEND, + HPB_FAILED, + HPB_RESET, +}; + +enum HPB_RGN_STATE { + HPB_RGN_INACTIVE, + HPB_RGN_ACTIVE, + /* pinned regions are always active */ + HPB_RGN_PINNED, +}; + +enum HPB_SRGN_STATE { + HPB_SRGN_UNUSED, + HPB_SRGN_INVALID, + HPB_SRGN_VALID, + HPB_SRGN_ISSUED, +}; + +/** + * struct ufshpb_lu_info - UFSHPB logical unit related info + * @num_blocks: the number of logical block + * @pinned_start: the start region number of pinned region + * @num_pinned: the number of pinned regions + * @max_active_rgns: maximum number of active regions + */ +struct ufshpb_lu_info { + int num_blocks; + int pinned_start; + int num_pinned; + int max_active_rgns; +}; + +struct ufshpb_map_ctx { + struct page **m_page; + unsigned long *ppn_dirty; +}; + +struct ufshpb_subregion { + struct ufshpb_map_ctx *mctx; + enum HPB_SRGN_STATE srgn_state; + int rgn_idx; + int srgn_idx; + bool is_last; + + /* subregion reads - for host mode */ + unsigned int reads; + + /* below information is used by rsp_list */ + struct list_head list_act_srgn; +}; + +struct ufshpb_region { + struct ufshpb_lu *hpb; + struct ufshpb_subregion *srgn_tbl; + enum HPB_RGN_STATE rgn_state; + int rgn_idx; + int srgn_cnt; + + /* below information is used by rsp_list */ + struct list_head list_inact_rgn; + + /* below information is used by lru */ + struct list_head list_lru_rgn; + unsigned long rgn_flags; +#define RGN_FLAG_DIRTY 0 +#define RGN_FLAG_UPDATE 1 + + /* region reads - for host mode */ + spinlock_t rgn_lock; + unsigned int reads; + /* region "cold" timer - for host mode */ + ktime_t read_timeout; + unsigned int read_timeout_expiries; + struct list_head list_expired_rgn; +}; + +#define for_each_sub_region(rgn, i, srgn) \ + for ((i) = 0; \ + ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \ + (i)++) + +/** + * struct ufshpb_req - HPB related request structure (write/read buffer) + * @req: block layer request structure + * @bio: bio for this request + * @hpb: ufshpb_lu structure that related to + * @list_req: ufshpb_req mempool list + * @sense: store its sense data + * @mctx: L2P map information + * @rgn_idx: target region index + * @srgn_idx: target sub-region index + * @lun: target logical unit number + * @m_page: L2P map information data for pre-request + * @len: length of host-side cached L2P map in m_page + * @lpn: start LPN of L2P map in m_page + */ +struct ufshpb_req { + struct request *req; + struct bio *bio; + struct ufshpb_lu *hpb; + struct list_head list_req; + union { + struct { + struct ufshpb_map_ctx *mctx; + unsigned int rgn_idx; + unsigned int srgn_idx; + unsigned int lun; + } rb; + struct { + struct page *m_page; + unsigned int len; + unsigned long lpn; + } wb; + }; +}; + +struct victim_select_info { + struct list_head lh_lru_rgn; /* LRU list of regions */ + int max_lru_active_cnt; /* supported hpb #region - pinned #region */ + atomic_t active_cnt; +}; + +/** + * ufshpb_params - ufs hpb parameters + * @requeue_timeout_ms - requeue threshold of wb command (0x2) + * @activation_thld - min reads [IOs] to activate/update a region + * @normalization_factor - shift right the region's reads + * @eviction_thld_enter - min reads [IOs] for the entering region in eviction + * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction + * @read_timeout_ms - timeout [ms] from the last read IO to the region + * @read_timeout_expiries - amount of allowable timeout expireis + * @timeout_polling_interval_ms - frequency in which timeouts are checked + * @inflight_map_req - number of inflight map requests + */ +struct ufshpb_params { + unsigned int requeue_timeout_ms; + unsigned int activation_thld; + unsigned int normalization_factor; + unsigned int eviction_thld_enter; + unsigned int eviction_thld_exit; + unsigned int read_timeout_ms; + unsigned int read_timeout_expiries; + unsigned int timeout_polling_interval_ms; + unsigned int inflight_map_req; +}; + +struct ufshpb_stats { + u64 hit_cnt; + u64 miss_cnt; + u64 rb_noti_cnt; + u64 rb_active_cnt; + u64 rb_inactive_cnt; + u64 map_req_cnt; + u64 pre_req_cnt; + u64 umap_req_cnt; +}; + +struct ufshpb_lu { + int lun; + struct scsi_device *sdev_ufs_lu; + + spinlock_t rgn_state_lock; /* for protect rgn/srgn state */ + struct ufshpb_region *rgn_tbl; + + atomic_t hpb_state; + + spinlock_t rsp_list_lock; + struct list_head lh_act_srgn; /* hold rsp_list_lock */ + struct list_head lh_inact_rgn; /* hold rsp_list_lock */ + + /* pre request information */ + struct ufshpb_req *pre_req; + int num_inflight_pre_req; + int throttle_pre_req; + int num_inflight_map_req; /* hold param_lock */ + spinlock_t param_lock; + + struct list_head lh_pre_req_free; + int cur_read_id; + int pre_req_min_tr_len; + int pre_req_max_tr_len; + + /* cached L2P map management worker */ + struct work_struct map_work; + + /* for selecting victim */ + struct victim_select_info lru_info; + struct work_struct ufshpb_normalization_work; + struct delayed_work ufshpb_read_to_work; + unsigned long work_data_bits; +#define TIMEOUT_WORK_RUNNING 0 + + /* pinned region information */ + u32 lu_pinned_start; + u32 lu_pinned_end; + + /* HPB related configuration */ + u32 rgns_per_lu; + u32 srgns_per_lu; + u32 last_srgn_entries; + int srgns_per_rgn; + u32 srgn_mem_size; + u32 entries_per_rgn_mask; + u32 entries_per_rgn_shift; + u32 entries_per_srgn; + u32 entries_per_srgn_mask; + u32 entries_per_srgn_shift; + u32 pages_per_srgn; + + bool is_hcm; + + struct ufshpb_stats stats; + struct ufshpb_params params; + + struct kmem_cache *map_req_cache; + struct kmem_cache *m_page_cache; + + struct list_head list_hpb_lu; +}; + +struct ufs_hba; +struct ufshcd_lrb; + +#ifndef CONFIG_SCSI_UFS_HPB +static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; } +static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {} +static void ufshpb_resume(struct ufs_hba *hba) {} +static void ufshpb_suspend(struct ufs_hba *hba) {} +static void ufshpb_reset(struct ufs_hba *hba) {} +static void ufshpb_reset_host(struct ufs_hba *hba) {} +static void ufshpb_init(struct ufs_hba *hba) {} +static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {} +static void ufshpb_remove(struct ufs_hba *hba) {} +static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; } +static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {} +static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {} +static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; } +#else +int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp); +void ufshpb_resume(struct ufs_hba *hba); +void ufshpb_suspend(struct ufs_hba *hba); +void ufshpb_reset(struct ufs_hba *hba); +void ufshpb_reset_host(struct ufs_hba *hba); +void ufshpb_init(struct ufs_hba *hba); +void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev); +void ufshpb_remove(struct ufs_hba *hba); +bool ufshpb_is_allowed(struct ufs_hba *hba); +void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf); +void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf); +bool ufshpb_is_legacy(struct ufs_hba *hba); +extern struct attribute_group ufs_sysfs_hpb_stat_group; +extern struct attribute_group ufs_sysfs_hpb_param_group; +#endif + +#endif /* End of Header */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index b0deaf4af5a3..c25ce8f0e0af 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -519,7 +519,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, struct virtio_scsi_cmd_req_pi *cmd_pi, struct scsi_cmnd *sc) { - struct request *rq = sc->request; + struct request *rq = scsi_cmd_to_rq(sc); struct blk_integrity *bi; virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc); @@ -543,7 +543,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, struct scsi_cmnd *sc) { - u32 tag = blk_mq_unique_tag(sc->request); + u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(sc)); u16 hwq = blk_mq_unique_tag_to_hwq(tag); return &vscsi->req_vqs[hwq]; diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index ec9d399fbbd8..0204e314b482 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c @@ -212,7 +212,7 @@ static int scsifront_do_request(struct vscsifrnt_info *info, memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; - ring_req->timeout_per_command = sc->request->timeout / HZ; + ring_req->timeout_per_command = scsi_cmd_to_rq(sc)->timeout / HZ; for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) ring_req->seg[i] = shadow->seg[i]; |