diff options
Diffstat (limited to 'drivers/scsi')
154 files changed, 6234 insertions, 3316 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 02832d64d918..baca5897039f 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1773,6 +1773,7 @@ config SCSI_BFA_FC config SCSI_VIRTIO tristate "virtio-scsi support" depends on VIRTIO + select BLK_DEV_INTEGRITY help This is the virtual HBA driver for virtio. If the kernel will be used in a virtual machine, say Y or M. diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index bcd223868227..93d13fc9a293 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -27,8 +27,6 @@ */ /* - * $Log: NCR5380.c,v $ - * Revision 1.10 1998/9/2 Alan Cox * (alan@lxorguk.ukuu.org.uk) * Fixed up the timer lockups reported so far. Things still suck. Looking @@ -89,13 +87,6 @@ #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_spi.h> -#ifndef NDEBUG -#define NDEBUG 0 -#endif -#ifndef NDEBUG_ABORT -#define NDEBUG_ABORT 0 -#endif - #if (NDEBUG & NDEBUG_LISTS) #define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); } #define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } @@ -1005,7 +996,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *) LIST(cmd, tmp); tmp->host_scribble = (unsigned char *) cmd; } - dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail")); + dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* Run the coroutine if it isn't already running. */ /* Kick off command processing */ @@ -1040,7 +1031,7 @@ static void NCR5380_main(struct work_struct *work) /* Lock held here */ done = 1; if (!hostdata->connected && !hostdata->selecting) { - dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no)); + dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no); /* * Search through the issue_queue for a command destined * for a target that's not busy. @@ -1048,7 +1039,7 @@ static void NCR5380_main(struct work_struct *work) for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) { if (prev != tmp) - dprintk(NDEBUG_LISTS, ("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun)); + dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); /* When we find one, remove it from the issue queue. */ if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) { if (prev) { @@ -1066,7 +1057,7 @@ static void NCR5380_main(struct work_struct *work) * On failure, we must add the command back to the * issue queue so we can keep trying. */ - dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun)); + dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun); /* * A successful selection is defined as one that @@ -1095,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work) tmp->host_scribble = (unsigned char *) hostdata->issue_queue; hostdata->issue_queue = tmp; done = 0; - dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no)); + dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no); } /* lock held here still */ } /* if target/lun is not busy */ @@ -1125,9 +1116,9 @@ static void NCR5380_main(struct work_struct *work) #endif && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies)) ) { - dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no)); + dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no); NCR5380_information_transfer(instance); - dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no)); + dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no); done = 0; } else break; @@ -1159,8 +1150,8 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) unsigned char basr; unsigned long flags; - dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n", - instance->irq)); + dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n", + instance->irq); do { done = 1; @@ -1173,14 +1164,14 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { done = 0; - dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no)); + dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no); NCR5380_reselect(instance); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { - dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no)); + dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no)); + dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { #if defined(REAL_DMA) @@ -1210,7 +1201,7 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); } #else - dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG))); + dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); #endif } @@ -1304,7 +1295,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) hostdata->restart_select = 0; NCR5380_dprint(NDEBUG_ARBITRATION, instance); - dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id); /* * Set the phase bits to 0, otherwise the NCR5380 won't drive the @@ -1333,7 +1324,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) goto failed; } - dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no); /* * The arbitration delay is 2.2us, but this is a minimum and there is @@ -1347,7 +1338,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) /* Check for lost arbitration */ if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { NCR5380_write(MODE_REG, MR_BASE); - dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no); goto failed; } NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL); @@ -1360,7 +1351,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no); goto failed; } /* @@ -1370,7 +1361,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) udelay(2); - dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no)); + dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no); /* * Now that we have won arbitration, start Selection process, asserting @@ -1422,7 +1413,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag) udelay(1); - dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd))); + dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)); /* * The SCSI specification calls for a 250 ms timeout for the actual @@ -1487,7 +1478,7 @@ part2: collect_stats(hostdata, cmd); cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no)); + dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } @@ -1520,7 +1511,7 @@ part2: goto failed; } - dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id)); + dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id); tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun); len = 1; @@ -1530,7 +1521,7 @@ part2: data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no)); + dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no); /* XXX need to handle errors here */ hostdata->connected = cmd; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); @@ -1583,9 +1574,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase NCR5380_setup(instance); if (!(p & SR_IO)) - dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c)); + dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c); else - dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c)); + dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c); /* * The NCR5380 chip will only drive the SCSI bus when the @@ -1620,11 +1611,11 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase break; } - dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no)); + dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { - dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no)); + dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no); NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance); break; } @@ -1660,7 +1651,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase /* FIXME - if this fails bus reset ?? */ NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ); - dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no)); + dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no); /* * We have several special cases to consider during REQ/ACK handshaking : @@ -1681,7 +1672,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase } } while (--c); - dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c)); + dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c); *count = c; *data = d; @@ -1828,7 +1819,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase c -= 2; } #endif - dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d)); + dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d); hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c); #endif @@ -1857,7 +1848,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE); #endif /* def REAL_DMA */ - dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG))); + dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)); /* * On the PAS16 at least I/O recovery delays are not needed here. @@ -1934,7 +1925,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase } } - dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG))); + dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG)); NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); @@ -1948,7 +1939,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase #ifdef READ_OVERRUNS if (*phase == p && (p & SR_IO) && residue == 0) { if (overrun) { - dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n")); + dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); **data = saved_data; *data += 1; *count -= 1; @@ -1957,13 +1948,13 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase printk("No overrun??\n"); cnt = toPIO = 2; } - dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data)); + dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data); NCR5380_transfer_pio(instance, phase, &cnt, data); *count -= toPIO - cnt; } #endif - dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count))); + dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)); return 0; #elif defined(REAL_DMA) @@ -2013,7 +2004,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase foo = NCR5380_pwrite(instance, d, c); #else int timeout; - dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c)); + dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c); if (!(foo = NCR5380_pwrite(instance, d, c))) { /* * Wait for the last byte to be sent. If REQ is being asserted for @@ -2024,19 +2015,19 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH)); if (!timeout) - dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no)); + dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no); if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) { hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT; if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) { hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT; - dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no)); + dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no); } } } else { - dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n")); + dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n"); while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)); - dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n")); + dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n"); } } #endif @@ -2045,9 +2036,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) { - dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n")); + dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n"); if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) { - dprintk(NDEBUG_C400_PWRITE, ("53C400w: got it, reading reset interrupt reg\n")); + dprintk(NDEBUG_C400_PWRITE, "53C400w: got it, reading reset interrupt reg\n"); NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { printk("53C400w: IRQ NOT THERE!\n"); @@ -2139,7 +2130,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); - dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual)); + dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } /* * The preferred transfer method is going to be @@ -2219,7 +2210,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { case LINKED_FLG_CMD_COMPLETE: /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun)); + dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun); /* * Sanity check : A linked command should only terminate with * one of these messages if there are more linked commands @@ -2235,7 +2226,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { /* The next command is still part of this process */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun)); + dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun); collect_stats(hostdata, cmd); cmd->scsi_done(cmd); cmd = hostdata->connected; @@ -2247,7 +2238,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { sink = 1; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = NULL; - dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun)); + dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun); hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); /* @@ -2281,13 +2272,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no)); + dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no); LIST(cmd, hostdata->issue_queue); cmd->host_scribble = (unsigned char *) hostdata->issue_queue; hostdata->issue_queue = (Scsi_Cmnd *) cmd; - dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no)); + dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no); } else #endif /* def AUTOSENSE */ { @@ -2327,7 +2318,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { hostdata->disconnected_queue; hostdata->connected = NULL; hostdata->disconnected_queue = cmd; - dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun)); + dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun); /* * Restore phase bits to 0 so an interrupted selection, * arbitration can resume. @@ -2373,14 +2364,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { extended_msg[0] = EXTENDED_MESSAGE; /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no)); + dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2])); + dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]); if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) { /* Accept third byte by clearing ACK */ @@ -2390,7 +2381,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len)); + dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len); switch (extended_msg[2]) { case EXTENDED_SDTR: @@ -2456,7 +2447,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { NCR5380_transfer_pio(instance, &phase, &len, &data); if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) { NCR5380_set_timer(hostdata, USLEEP_SLEEP); - dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); + dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires); return; } break; @@ -2468,7 +2459,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { break; default: printk("scsi%d : unknown phase\n", instance->host_no); - NCR5380_dprint(NDEBUG_ALL, instance); + NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ else { @@ -2476,7 +2467,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) { */ if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) { NCR5380_set_timer(hostdata, USLEEP_SLEEP); - dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); + dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires); return; } } @@ -2517,7 +2508,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { hostdata->restart_select = 1; target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no)); + dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no); /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2597,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) { do_abort(instance); } else { hostdata->connected = tmp; - dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag)); + dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag); } } @@ -2682,8 +2673,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { NCR5380_setup(instance); - dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no)); - dprintk(NDEBUG_ABORT, (" basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG))); + dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no); + dprintk(NDEBUG_ABORT, " basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); #if 0 /* @@ -2693,7 +2684,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { */ if (hostdata->connected == cmd) { - dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no); hostdata->aborted = 1; /* * We should perform BSY checking, and make sure we haven't slipped @@ -2721,14 +2712,14 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { * from the issue queue. */ - dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no); for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble) if (cmd == tmp) { REMOVE(5, *prev, tmp, tmp->host_scribble); (*prev) = (Scsi_Cmnd *) tmp->host_scribble; tmp->host_scribble = NULL; tmp->result = DID_ABORT << 16; - dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no); tmp->scsi_done(tmp); return SUCCESS; } @@ -2750,7 +2741,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { */ if (hostdata->connected) { - dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no); return FAILED; } /* @@ -2780,11 +2771,11 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) { for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble) if (cmd == tmp) { - dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no); if (NCR5380_select(instance, cmd, (int) cmd->tag)) return FAILED; - dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no)); + dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no); do_abort(instance); diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 14964d0a0e9d..c79ddfa6f53c 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -21,10 +21,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: NCR5380.h,v $ - */ - #ifndef NCR5380_H #define NCR5380_H @@ -60,6 +56,9 @@ #define NDEBUG_C400_PREAD 0x100000 #define NDEBUG_C400_PWRITE 0x200000 #define NDEBUG_LISTS 0x400000 +#define NDEBUG_ABORT 0x800000 +#define NDEBUG_TAGS 0x1000000 +#define NDEBUG_MERGING 0x2000000 #define NDEBUG_ANY 0xFFFFFFFFUL @@ -292,9 +291,24 @@ struct NCR5380_hostdata { #ifdef __KERNEL__ -#define dprintk(a,b) do {} while(0) -#define NCR5380_dprint(a,b) do {} while(0) -#define NCR5380_dprint_phase(a,b) do {} while(0) +#ifndef NDEBUG +#define NDEBUG (0) +#endif + +#define dprintk(flg, fmt, ...) \ + do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0) + +#if NDEBUG +#define NCR5380_dprint(flg, arg) \ + do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0) +#define NCR5380_dprint_phase(flg, arg) \ + do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0) +static void NCR5380_print_phase(struct Scsi_Host *instance); +static void NCR5380_print(struct Scsi_Host *instance); +#else +#define NCR5380_dprint(flg, arg) do {} while (0) +#define NCR5380_dprint_phase(flg, arg) do {} while (0) +#endif #if defined(AUTOPROBE_IRQ) static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); @@ -307,10 +321,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id); #endif static void NCR5380_main(struct work_struct *work); static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); -#ifdef NDEBUG -static void NCR5380_print_phase(struct Scsi_Host *instance); -static void NCR5380_print(struct Scsi_Host *instance); -#endif static int NCR5380_abort(Scsi_Cmnd * cmd); static int NCR5380_bus_reset(Scsi_Cmnd * cmd); static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index 9b059422aacb..113874c1284b 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h @@ -911,7 +911,7 @@ struct vpd_config { uint8_t length; uint8_t revision; uint8_t device_flags; - uint8_t termnation_menus[2]; + uint8_t termination_menus[2]; uint8_t fifo_threshold; uint8_t end_tag; uint8_t vpd_checksum; diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c index 14b5f8d0e7f4..cc9bd26f5d1a 100644 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c @@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd) for (bit = 0; bit < 8; bit++) { if ((pci_status[i] & (0x1 << bit)) != 0) { - static const char *s; + const char *s; s = pci_status_strings[bit]; if (i == 7/*TARG*/ && bit == 3) @@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat) for (bit = 0; bit < 8; bit++) { - if ((split_status[i] & (0x1 << bit)) != 0) { - static const char *s; - - s = split_status_strings[bit]; - printk(s, ahd_name(ahd), + if ((split_status[i] & (0x1 << bit)) != 0) + printk(split_status_strings[bit], ahd_name(ahd), split_status_source[i]); - } if (i > 1) continue; - if ((sg_split_status[i] & (0x1 << bit)) != 0) { - static const char *s; - - s = split_status_strings[bit]; - printk(s, ahd_name(ahd), "SG"); - } + if ((sg_split_status[i] & (0x1 << bit)) != 0) + printk(split_status_strings[bit], ahd_name(ahd), "SG"); } } /* diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index c0c62583b542..114ff0c6e311 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -145,16 +145,6 @@ static struct scsi_transport_template *ahc_linux_transport_template = NULL; #endif /* - * Control collection of SCSI transfer statistics for the /proc filesystem. - * - * NOTE: Do NOT enable this when running on kernels version 1.2.x and below. - * NOTE: This does affect performance since it has to maintain statistics. - */ -#ifdef CONFIG_AIC7XXX_PROC_STATS -#define AIC7XXX_PROC_STATS -#endif - -/* * To change the default number of tagged transactions allowed per-device, * add a line to the lilo.conf file like: * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}" diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 059ff477a398..2e797a367608 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -62,13 +62,6 @@ */ #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE /* - * SCSI-II Linked command support. - * - * The higher level code doesn't support linked commands yet, and so the option - * is undef'd here. - */ -#undef CONFIG_SCSI_ACORNSCSI_LINK -/* * SCSI-II Synchronous transfer support. * * Tried and tested... @@ -160,10 +153,6 @@ #error "Yippee! ABORT TAG is now defined! Remove this error!" #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK -#error SCSI2 LINKed commands not supported (yet)! -#endif - #ifdef USE_DMAC /* * DMAC setup parameters @@ -1668,42 +1657,6 @@ void acornscsi_message(AS_Host *host) } break; -#ifdef CONFIG_SCSI_ACORNSCSI_LINK - case LINKED_CMD_COMPLETE: - case LINKED_FLG_CMD_COMPLETE: - /* - * We don't support linked commands yet - */ - if (0) { -#if (DEBUG & DEBUG_LINK) - printk("scsi%d.%c: lun %d tag %d linked command complete\n", - host->host->host_no, acornscsi_target(host), host->SCpnt->tag); -#endif - /* - * A linked command should only terminate with one of these messages - * if there are more linked commands available. - */ - if (!host->SCpnt->next_link) { - printk(KERN_WARNING "scsi%d.%c: lun %d tag %d linked command complete, but no next_link\n", - instance->host_no, acornscsi_target(host), host->SCpnt->tag); - acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); - msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); - } else { - struct scsi_cmnd *SCpnt = host->SCpnt; - - acornscsi_dma_cleanup(host); - - host->SCpnt = host->SCpnt->next_link; - host->SCpnt->tag = SCpnt->tag; - SCpnt->result = DID_OK | host->scsi.SCp.Message << 8 | host->Scsi.SCp.Status; - SCpnt->done(SCpnt); - - /* initialise host->SCpnt->SCp */ - } - break; - } -#endif - default: /* reject message */ printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n", host->host->host_no, acornscsi_target(host), @@ -2825,9 +2778,6 @@ char *acornscsi_info(struct Scsi_Host *host) #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE " TAG" #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK - " LINK" -#endif #if (DEBUG & DEBUG_NO_WRITE) " NOWRITE (" __stringify(NO_WRITE) ")" #endif @@ -2851,9 +2801,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance) #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE " TAG" #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK - " LINK" -#endif #if (DEBUG & DEBUG_NO_WRITE) " NOWRITE (" __stringify(NO_WRITE) ")" #endif diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index f8e060900052..8ef810a4476e 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -36,9 +36,6 @@ void __iomem *base; \ void __iomem *dma -#define BOARD_NORMAL 0 -#define BOARD_NCR53C400 1 - #include "../NCR5380.h" void cumanascsi_setup(char *str, int *ints) diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 4266eef8aca1..188e734c7ff0 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -37,9 +37,6 @@ #define NCR5380_implementation_fields \ void __iomem *base -#define BOARD_NORMAL 0 -#define BOARD_NCR53C400 1 - #include "../NCR5380.h" #undef START_DMA_INITIATOR_RECEIVE_REG diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 0f3cdbc80ba6..1814aa20b724 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -370,7 +370,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) return 0; if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { - TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n", + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, cmd->device->lun); return 1; } @@ -394,7 +394,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) !setup_use_tagged_queuing || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); - TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun); } else { TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; @@ -402,7 +402,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); set_bit(cmd->tag, ta->allocated); ta->nr_allocated++; - TAG_PRINTK("scsi%d: using tag %d for target %d lun %d " + dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, ta->nr_allocated); @@ -420,7 +420,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd) if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); - TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n", + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, cmd->device->lun); } else if (cmd->tag >= MAX_TAGS) { printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", @@ -429,7 +429,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd) TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; clear_bit(cmd->tag, ta->allocated); ta->nr_allocated--; - TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n", + dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun); } } @@ -478,7 +478,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; cmd->SCp.buffers_residual && virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { - MER_PRINTK("VTOP(%p) == %08lx -> merging\n", + dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n", page_address(sg_page(&cmd->SCp.buffer[1])), endaddr); #if (NDEBUG & NDEBUG_MERGING) ++cnt; @@ -490,7 +490,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd) } #if (NDEBUG & NDEBUG_MERGING) if (oldlen != cmd->SCp.this_residual) - MER_PRINTK("merged %d buffers from %p, new length %08x\n", + dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n", cnt, cmd->SCp.ptr, cmd->SCp.this_residual); #endif } @@ -626,16 +626,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) } } -#else /* !NDEBUG */ - -/* dummies... */ -static inline void NCR5380_print(struct Scsi_Host *instance) -{ -}; -static inline void NCR5380_print_phase(struct Scsi_Host *instance) -{ -}; - #endif /* @@ -676,7 +666,7 @@ static inline void NCR5380_all_init(void) { static int done = 0; if (!done) { - INI_PRINTK("scsi : NCR5380_all_init()\n"); + dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); done = 1; } } @@ -739,8 +729,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance) Scsi_Cmnd *ptr; unsigned long flags; - NCR_PRINT(NDEBUG_ANY); - NCR_PRINT_PHASE(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); hostdata = (struct NCR5380_hostdata *)instance->hostdata; @@ -984,7 +974,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) } local_irq_restore(flags); - QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), + dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom @@ -1054,7 +1044,7 @@ static void NCR5380_main(struct work_struct *work) done = 1; if (!hostdata->connected) { - MAIN_PRINTK("scsi%d: not connected\n", HOSTNO); + dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO); /* * Search through the issue_queue for a command destined * for a target that's not busy. @@ -1107,7 +1097,7 @@ static void NCR5380_main(struct work_struct *work) * On failure, we must add the command back to the * issue queue so we can keep trying. */ - MAIN_PRINTK("scsi%d: main(): command for target %d " + dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", HOSTNO, tmp->device->id, tmp->device->lun); /* @@ -1140,7 +1130,7 @@ static void NCR5380_main(struct work_struct *work) #endif falcon_dont_release--; local_irq_restore(flags); - MAIN_PRINTK("scsi%d: main(): select() failed, " + dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; @@ -1155,10 +1145,10 @@ static void NCR5380_main(struct work_struct *work) #endif ) { local_irq_restore(flags); - MAIN_PRINTK("scsi%d: main: performing information transfer\n", + dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); - MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); + dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); @@ -1204,12 +1194,12 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) (BASR_PHASE_MATCH|BASR_ACK)) { saved_data = NCR5380_read(INPUT_DATA_REG); overrun = 1; - DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO); + dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO); } } } - DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", + dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -1229,13 +1219,13 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance) if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { cnt = toPIO = atari_read_overruns; if (overrun) { - DMA_PRINTK("Got an input overrun, using saved byte\n"); + dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n"); *(*data)++ = saved_data; (*count)--; cnt--; toPIO--; } - DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); + dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); NCR5380_transfer_pio(instance, &p, &cnt, data); *count -= toPIO - cnt; } @@ -1261,25 +1251,25 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) int done = 1, handled = 0; unsigned char basr; - INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); - INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); + dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { - NCR_PRINT(NDEBUG_INTR); + NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; ENABLE_IRQ(); - INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { - INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { /* @@ -1298,7 +1288,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { - INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; ENABLE_IRQ(); @@ -1323,7 +1313,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) } if (!done) { - INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(); } @@ -1396,8 +1386,8 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) unsigned long flags; hostdata->restart_select = 0; - NCR_PRINT(NDEBUG_ARBITRATION); - ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, + NCR5380_dprint(NDEBUG_ARBITRATION, instance); + dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* @@ -1442,7 +1432,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) ; #endif - ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); + dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); @@ -1463,7 +1453,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); - ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", + dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } @@ -1478,7 +1468,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", + dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } @@ -1501,7 +1491,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) return -1; } - ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); + dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting @@ -1561,7 +1551,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) udelay(1); - SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); + dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual @@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); - NCR_PRINT(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } @@ -1630,7 +1620,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); + dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } @@ -1656,7 +1646,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) while (!(NCR5380_read(STATUS_REG) & SR_REQ)) ; - SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", + dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); @@ -1676,7 +1666,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); - SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); + dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS @@ -1737,12 +1727,12 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) ; - HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); + dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { - PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); - NCR_PRINT_PHASE(NDEBUG_PIO); + dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); + NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } @@ -1764,25 +1754,25 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, if (!(p & SR_IO)) { if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ) ; - HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); + dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : @@ -1803,7 +1793,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, } } while (--c); - PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); + dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; @@ -1917,7 +1907,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, if (atari_read_overruns && (p & SR_IO)) c -= atari_read_overruns; - DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", + dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", d); @@ -1997,7 +1987,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; - NCR_PRINT_PHASE(NDEBUG_INFORMATION); + NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } if (sink && (phase != PHASE_MSGOUT)) { @@ -2039,7 +2029,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) * they are at contiguous physical addresses. */ merge_contiguous_buffers(cmd); - INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", + dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } @@ -2123,7 +2113,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - LNK_PRINTK("scsi%d: target %d lun %d linked command " + dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ @@ -2148,7 +2138,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - LNK_PRINTK("scsi%d: target %d lun %d linked request " + dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef NCR5380_STATS @@ -2165,7 +2155,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* ++guenther: possible race with Falcon locking */ falcon_dont_release++; hostdata->connected = NULL; - QU_PRINTK("scsi%d: command for target %d, lun %d " + dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef SUPPORT_TAGS cmd_free_tag(cmd); @@ -2179,7 +2169,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; - TAG_PRINTK("scsi%d: target %d lun %d returned " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); @@ -2224,14 +2214,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); + dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); local_irq_save(flags); LIST(cmd,hostdata->issue_queue); SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (Scsi_Cmnd *) cmd; local_irq_restore(flags); - QU_PRINTK("scsi%d: REQUEST SENSE added to head of " + dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else #endif /* def AUTOSENSE */ @@ -2277,7 +2267,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; - TAG_PRINTK("scsi%d: target %d lun %d rejected " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); @@ -2294,7 +2284,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); - QU_PRINTK("scsi%d: command for target %d lun %d was " + dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); @@ -2344,13 +2334,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); + dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, + dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= @@ -2362,7 +2352,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - EXT_PRINTK("scsi%d: message received, residual %d\n", + dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { @@ -2451,7 +2441,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) break; default: printk("scsi%d: unknown phase\n", HOSTNO); - NCR_PRINT(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ @@ -2493,7 +2483,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - RSL_PRINTK("scsi%d: reselect\n", HOSTNO); + dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2544,7 +2534,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; - TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " + dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif @@ -2598,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = tmp; - RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", + dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); falcon_dont_release--; } @@ -2640,7 +2630,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", HOSTNO); - ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, + dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -2653,7 +2643,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) if (hostdata->connected == cmd) { - ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. @@ -2683,11 +2673,11 @@ int NCR5380_abort(Scsi_Cmnd *cmd) local_irq_restore(flags); cmd->scsi_done(cmd); falcon_release_lock_if_possible(hostdata); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } else { /* local_irq_restore(flags); */ printk("scsi%d: abort of connected command failed!\n", HOSTNO); - return SCSI_ABORT_ERROR; + return FAILED; } } #endif @@ -2705,13 +2695,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd) SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; local_irq_restore(flags); - ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", + dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); falcon_release_lock_if_possible(hostdata); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } } @@ -2728,8 +2718,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd) if (hostdata->connected) { local_irq_restore(flags); - ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); - return SCSI_ABORT_SNOOZE; + dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); + return FAILED; } /* @@ -2761,12 +2751,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd) tmp = NEXT(tmp)) { if (cmd == tmp) { local_irq_restore(flags); - ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select(instance, cmd, (int)cmd->tag)) - return SCSI_ABORT_BUSY; + return FAILED; - ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); do_abort(instance); @@ -2791,7 +2781,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) local_irq_restore(flags); tmp->scsi_done(tmp); falcon_release_lock_if_possible(hostdata); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } } } @@ -2816,7 +2806,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) */ falcon_release_lock_if_possible(hostdata); - return SCSI_ABORT_NOT_RUNNING; + return FAILED; } @@ -2825,7 +2815,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd) * * Purpose : reset the SCSI bus. * - * Returns : SCSI_RESET_WAKEUP + * Returns : SUCCESS or FAILURE * */ @@ -2834,7 +2824,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) SETUP_HOSTDATA(cmd->device->host); int i; unsigned long flags; -#if 1 +#if defined(RESET_RUN_DONE) Scsi_Cmnd *connected, *disconnected_queue; #endif @@ -2859,7 +2849,14 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) * through anymore ... */ (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); -#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ + /* MSch 20140115 - looking at the generic NCR5380 driver, all of this + * should go. + * Catch-22: if we don't clear all queues, the SCSI driver lock will + * not be reset by atari_scsi_reset()! + */ + +#if defined(RESET_RUN_DONE) + /* XXX Should now be done by midlevel code, but it's broken XXX */ /* XXX see below XXX */ /* MSch: old-style reset: actually abort all command processing here */ @@ -2890,7 +2887,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) */ if ((cmd = connected)) { - ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done(cmd); } @@ -2902,7 +2899,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) cmd->scsi_done(cmd); } if (i > 0) - ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); + dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); /* The Falcon lock should be released after a reset... */ @@ -2915,7 +2912,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) * the midlevel code that the reset was SUCCESSFUL, and there is no * need to 'wake up' the commands by a request_sense */ - return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; + return SUCCESS; #else /* 1 */ /* MSch: new-style reset handling: let the mid-level do what it can */ @@ -2942,11 +2939,11 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) */ if (hostdata->issue_queue) - ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) - ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) - ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; @@ -2963,6 +2960,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd) local_irq_restore(flags); /* we did no complete reset of all commands, so a wakeup is required */ - return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; + return SUCCESS; #endif /* 1 */ } diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 296c936cc03c..b522134528d6 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -67,12 +67,6 @@ #include <linux/module.h> -#define NDEBUG (0) - -#define NDEBUG_ABORT 0x00100000 -#define NDEBUG_TAGS 0x00200000 -#define NDEBUG_MERGING 0x00400000 - #define AUTOSENSE /* For the Atari version, use only polled IO or REAL_DMA */ #define REAL_DMA @@ -314,7 +308,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) dma_stat = tt_scsi_dma.dma_ctrl; - INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n", + dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n", atari_scsi_host->host_no, dma_stat & 0xff); /* Look if it was the DMA that has interrupted: First possibility @@ -340,7 +334,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); - DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", + dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); if ((signed int)atari_dma_residual < 0) @@ -371,7 +365,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy) * other command. These shouldn't disconnect anyway. */ if (atari_dma_residual & 0x1ff) { - DMA_PRINTK("SCSI DMA: DMA bug corrected, " + dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, " "difference %ld bytes\n", 512 - (atari_dma_residual & 0x1ff)); atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; @@ -438,7 +432,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy) "ST-DMA fifo\n", transferred & 15); atari_dma_residual = HOSTDATA_DMALEN - transferred; - DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", + dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n", atari_dma_residual); } else atari_dma_residual = 0; @@ -474,11 +468,11 @@ static void atari_scsi_fetch_restbytes(void) /* there are 'nr' bytes left for the last long address before the DMA pointer */ phys_dst ^= nr; - DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", + dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", nr, phys_dst); /* The content of the DMA pointer is a physical address! */ dst = phys_to_virt(phys_dst); - DMA_PRINTK(" = virt addr %p\n", dst); + dprintk(NDEBUG_DMA, " = virt addr %p\n", dst); for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr) *dst++ = *src++; } @@ -639,7 +633,7 @@ static int __init atari_scsi_detect(struct scsi_host_template *host) "double buffer\n"); return 0; } - atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer); + atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); atari_dma_orig_addr = 0; } #endif @@ -827,7 +821,7 @@ static int atari_scsi_bus_reset(Scsi_Cmnd *cmd) } else { atari_turnon_irq(IRQ_MFP_FSCSI); } - if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) + if (rv == SUCCESS) falcon_release_lock_if_possible(hostdata); return rv; @@ -883,7 +877,7 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, { unsigned long addr = virt_to_phys(data); - DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " + dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " "dir = %d\n", instance->host_no, data, addr, count, dir); if (!IS_A_TT() && !STRAM_ADDR(addr)) { @@ -1063,7 +1057,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len, possible_len = limit; if (possible_len != wanted_len) - DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " + dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes " "instead of %ld\n", possible_len, wanted_len); return possible_len; diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h index 11c624bb122d..3299d91d7336 100644 --- a/drivers/scsi/atari_scsi.h +++ b/drivers/scsi/atari_scsi.h @@ -54,125 +54,6 @@ #define NCR5380_dma_xfer_len(i,cmd,phase) \ atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) -/* former generic SCSI error handling stuff */ - -#define SCSI_ABORT_SNOOZE 0 -#define SCSI_ABORT_SUCCESS 1 -#define SCSI_ABORT_PENDING 2 -#define SCSI_ABORT_BUSY 3 -#define SCSI_ABORT_NOT_RUNNING 4 -#define SCSI_ABORT_ERROR 5 - -#define SCSI_RESET_SNOOZE 0 -#define SCSI_RESET_PUNT 1 -#define SCSI_RESET_SUCCESS 2 -#define SCSI_RESET_PENDING 3 -#define SCSI_RESET_WAKEUP 4 -#define SCSI_RESET_NOT_RUNNING 5 -#define SCSI_RESET_ERROR 6 - -#define SCSI_RESET_SYNCHRONOUS 0x01 -#define SCSI_RESET_ASYNCHRONOUS 0x02 -#define SCSI_RESET_SUGGEST_BUS_RESET 0x04 -#define SCSI_RESET_SUGGEST_HOST_RESET 0x08 - -#define SCSI_RESET_BUS_RESET 0x100 -#define SCSI_RESET_HOST_RESET 0x200 -#define SCSI_RESET_ACTION 0xff - -/* Debugging printk definitions: - * - * ARB -> arbitration - * ASEN -> auto-sense - * DMA -> DMA - * HSH -> PIO handshake - * INF -> information transfer - * INI -> initialization - * INT -> interrupt - * LNK -> linked commands - * MAIN -> NCR5380_main() control flow - * NDAT -> no data-out phase - * NWR -> no write commands - * PIO -> PIO transfers - * PDMA -> pseudo DMA (unused on Atari) - * QU -> queues - * RSL -> reselections - * SEL -> selections - * USL -> usleep cpde (unused on Atari) - * LBS -> last byte sent (unused on Atari) - * RSS -> restarting of selections - * EXT -> extended messages - * ABRT -> aborting and resetting - * TAG -> queue tag handling - * MER -> merging of consec. buffers - * - */ - -#define dprint(flg, format...) \ -({ \ - if (NDEBUG & (flg)) \ - printk(KERN_DEBUG format); \ -}) - -#define ARB_PRINTK(format, args...) \ - dprint(NDEBUG_ARBITRATION, format , ## args) -#define ASEN_PRINTK(format, args...) \ - dprint(NDEBUG_AUTOSENSE, format , ## args) -#define DMA_PRINTK(format, args...) \ - dprint(NDEBUG_DMA, format , ## args) -#define HSH_PRINTK(format, args...) \ - dprint(NDEBUG_HANDSHAKE, format , ## args) -#define INF_PRINTK(format, args...) \ - dprint(NDEBUG_INFORMATION, format , ## args) -#define INI_PRINTK(format, args...) \ - dprint(NDEBUG_INIT, format , ## args) -#define INT_PRINTK(format, args...) \ - dprint(NDEBUG_INTR, format , ## args) -#define LNK_PRINTK(format, args...) \ - dprint(NDEBUG_LINKED, format , ## args) -#define MAIN_PRINTK(format, args...) \ - dprint(NDEBUG_MAIN, format , ## args) -#define NDAT_PRINTK(format, args...) \ - dprint(NDEBUG_NO_DATAOUT, format , ## args) -#define NWR_PRINTK(format, args...) \ - dprint(NDEBUG_NO_WRITE, format , ## args) -#define PIO_PRINTK(format, args...) \ - dprint(NDEBUG_PIO, format , ## args) -#define PDMA_PRINTK(format, args...) \ - dprint(NDEBUG_PSEUDO_DMA, format , ## args) -#define QU_PRINTK(format, args...) \ - dprint(NDEBUG_QUEUES, format , ## args) -#define RSL_PRINTK(format, args...) \ - dprint(NDEBUG_RESELECTION, format , ## args) -#define SEL_PRINTK(format, args...) \ - dprint(NDEBUG_SELECTION, format , ## args) -#define USL_PRINTK(format, args...) \ - dprint(NDEBUG_USLEEP, format , ## args) -#define LBS_PRINTK(format, args...) \ - dprint(NDEBUG_LAST_BYTE_SENT, format , ## args) -#define RSS_PRINTK(format, args...) \ - dprint(NDEBUG_RESTART_SELECT, format , ## args) -#define EXT_PRINTK(format, args...) \ - dprint(NDEBUG_EXTENDED, format , ## args) -#define ABRT_PRINTK(format, args...) \ - dprint(NDEBUG_ABORT, format , ## args) -#define TAG_PRINTK(format, args...) \ - dprint(NDEBUG_TAGS, format , ## args) -#define MER_PRINTK(format, args...) \ - dprint(NDEBUG_MERGING, format , ## args) - -/* conditional macros for NCR5380_print_{,phase,status} */ - -#define NCR_PRINT(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0) - -#define NCR_PRINT_PHASE(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0) - -#define NCR_PRINT_STATUS(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0) - - #endif /* ndef ASM */ #endif /* ATARI_SCSI_H */ diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index 1bfb0bd01198..860f527d8f26 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -83,9 +83,20 @@ static inline void queue_tail_inc(struct be_queue_info *q) /*ISCSI */ +struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + bool enable; + u32 min_eqd; /* in usecs */ + u32 max_eqd; /* in usecs */ + u32 prev_eqd; /* in usecs */ + u32 et_eqd; /* configured val when aic is off */ + ulong jiffs; + u64 eq_prev; /* Used to calculate eqe */ +}; + struct be_eq_obj { bool todo_mcc_cq; bool todo_cq; + u32 cq_count; struct be_queue_info q; struct beiscsi_hba *phba; struct be_queue_info *cq; diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 7cf7f99ee442..cc7405c0eca0 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -71,6 +71,7 @@ struct be_mcc_wrb { #define BEISCSI_FW_MBX_TIMEOUT 100 /* MBOX Command VER */ +#define MBX_CMD_VER1 0x01 #define MBX_CMD_VER2 0x02 struct be_mcc_compl { @@ -271,6 +272,12 @@ struct be_cmd_resp_eq_create { u16 rsvd0; /* sword */ } __packed; +struct be_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +} __packed; + struct mgmt_chap_format { u32 flags; u8 intr_chap_name[256]; @@ -622,7 +629,7 @@ struct be_cmd_req_modify_eq_delay { u32 eq_id; u32 phase; u32 delay_multiplier; - } delay[8]; + } delay[MAX_CPUS]; } __packed; /******************** Get MAC ADDR *******************/ @@ -708,6 +715,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba); void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, + int num); int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint32_t tag, struct be_mcc_wrb **wrb, struct be_dma_mem *mbx_cmd_mem); @@ -1005,6 +1014,26 @@ struct tcp_connect_and_offload_in { u8 rsvd0[3]; } __packed; +struct tcp_connect_and_offload_in_v1 { + struct be_cmd_req_hdr hdr; + struct ip_addr_format ip_address; + u16 tcp_port; + u16 cid; + u16 cq_id; + u16 defq_id; + struct phys_addr dataout_template_pa; + u16 hdr_ring_id; + u16 data_ring_id; + u8 do_offload; + u8 ifd_state; + u8 rsvd0[2]; + u16 tcp_window_size; + u8 tcp_window_scale_count; + u8 rsvd1; + u32 tcp_mss:24; + u8 rsvd2; +} __packed; + struct tcp_connect_and_offload_out { struct be_cmd_resp_hdr hdr; u32 connection_handle; diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index a3df43324c98..fd284ff36ecf 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1106,7 +1106,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, struct beiscsi_hba *phba = beiscsi_ep->phba; struct tcp_connect_and_offload_out *ptcpcnct_out; struct be_dma_mem nonemb_cmd; - unsigned int tag; + unsigned int tag, req_memsize; int ret = -ENOMEM; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, @@ -1127,8 +1127,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, (beiscsi_ep->ep_cid)] = ep; beiscsi_ep->cid_vld = 0; + + if (is_chip_be2_be3r(phba)) + req_memsize = sizeof(struct tcp_connect_and_offload_in); + else + req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); + nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, - sizeof(struct tcp_connect_and_offload_in), + req_memsize, &nonemb_cmd.dma); if (nonemb_cmd.va == NULL) { @@ -1139,7 +1145,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, beiscsi_free_ep(beiscsi_ep); return -ENOMEM; } - nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); + nonemb_cmd.size = req_memsize; memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); if (tag <= 0) { diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 0d822297aa80..56467df3d6de 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -599,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) pci_set_drvdata(pcidev, phba); phba->interface_handle = 0xFFFFFFFF; - if (iscsi_host_add(shost, &phba->pcidev->dev)) - goto free_devices; - return phba; - -free_devices: - pci_dev_put(phba->pcidev); - iscsi_host_free(phba->shost); - return NULL; } static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) @@ -2279,6 +2271,7 @@ static int be_iopoll(struct blk_iopoll *iop, int budget) pbe_eq = container_of(iop, struct be_eq_obj, iopoll); ret = beiscsi_process_cq(pbe_eq); + pbe_eq->cq_count += ret; if (ret < budget) { phba = pbe_eq->phba; blk_iopoll_complete(iop); @@ -3692,7 +3685,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct hwi_async_pdu_context *pasync_ctx; - int i, eq_num, ulp_num; + int i, eq_for_mcc, ulp_num; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; @@ -3729,16 +3722,17 @@ static void hwi_cleanup(struct beiscsi_hba *phba) if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); } + + be_mcc_queues_destroy(phba); if (phba->msix_enabled) - eq_num = 1; + eq_for_mcc = 1; else - eq_num = 0; - for (i = 0; i < (phba->num_cpus + eq_num); i++) { + eq_for_mcc = 0; + for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { q = &phwi_context->be_eq[i].q; if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } - be_mcc_queues_destroy(phba); be_cmd_fw_uninit(ctrl); } @@ -3833,9 +3827,9 @@ static int hwi_init_port(struct beiscsi_hba *phba) phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - phwi_context->max_eqd = 0; + phwi_context->max_eqd = 128; phwi_context->min_eqd = 0; - phwi_context->cur_eqd = 64; + phwi_context->cur_eqd = 0; be_cmd_fw_initialize(&phba->ctrl); status = beiscsi_create_eqs(phba, phwi_context); @@ -4204,6 +4198,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) kfree(phba->ep_array); phba->ep_array = NULL; ret = -ENOMEM; + + goto free_memory; } for (i = 0; i < phba->params.cxns_per_ctrl; i++) { @@ -5290,6 +5286,57 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba) return; } +static void be_eqd_update(struct beiscsi_hba *phba) +{ + struct be_set_eqd set_eqd[MAX_CPUS]; + struct be_aic_obj *aic; + struct be_eq_obj *pbe_eq; + struct hwi_controller *phwi_ctrlr; + struct hwi_context_memory *phwi_context; + int eqd, i, num = 0; + ulong now; + u32 pps, delta; + unsigned int tag; + + phwi_ctrlr = phba->phwi_ctrlr; + phwi_context = phwi_ctrlr->phwi_ctxt; + + for (i = 0; i <= phba->num_cpus; i++) { + aic = &phba->aic_obj[i]; + pbe_eq = &phwi_context->be_eq[i]; + now = jiffies; + if (!aic->jiffs || time_before(now, aic->jiffs) || + pbe_eq->cq_count < aic->eq_prev) { + aic->jiffs = now; + aic->eq_prev = pbe_eq->cq_count; + continue; + } + delta = jiffies_to_msecs(now - aic->jiffs); + pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); + eqd = (pps / 1500) << 2; + + if (eqd < 8) + eqd = 0; + eqd = min_t(u32, eqd, phwi_context->max_eqd); + eqd = max_t(u32, eqd, phwi_context->min_eqd); + + aic->jiffs = now; + aic->eq_prev = pbe_eq->cq_count; + + if (eqd != aic->prev_eqd) { + set_eqd[num].delay_multiplier = (eqd * 65)/100; + set_eqd[num].eq_id = pbe_eq->q.id; + aic->prev_eqd = eqd; + num++; + } + } + if (num) { + tag = be_cmd_modify_eq_delay(phba, set_eqd, num); + if (tag) + beiscsi_mccq_compl(phba, tag, NULL, NULL); + } +} + /* * beiscsi_hw_health_check()- Check adapter health * @work: work item to check HW health @@ -5303,6 +5350,8 @@ beiscsi_hw_health_check(struct work_struct *work) container_of(work, struct beiscsi_hba, beiscsi_hw_check_task.work); + be_eqd_update(phba); + beiscsi_ue_detect(phba); schedule_delayed_work(&phba->beiscsi_hw_check_task, @@ -5579,7 +5628,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, phba->ctrl.mcc_numtag[i + 1] = 0; phba->ctrl.mcc_tag_available++; memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, - sizeof(struct beiscsi_mcc_tag_state)); + sizeof(struct be_dma_mem)); } phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; @@ -5621,6 +5670,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, } hwi_enable_intr(phba); + if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) + goto free_blkenbld; + if (beiscsi_setup_boot_info(phba)) /* * log error but continue, because we may not be using diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 9380b55bdeaf..9ceab426eec9 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -36,7 +36,7 @@ #include <scsi/scsi_transport_iscsi.h> #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.2.125.0" +#define BUILD_STR "10.2.273.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -71,8 +71,8 @@ #define BEISCSI_SGLIST_ELEMENTS 30 -#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ -#define BEISCSI_MAX_SECTORS 2048 /* scsi_host->max_sectors */ +#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */ +#define BEISCSI_MAX_SECTORS 1024 /* scsi_host->max_sectors */ #define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */ #define BEISCSI_MAX_CMD_LEN 16 /* scsi_host->max_cmd_len */ @@ -427,6 +427,7 @@ struct beiscsi_hba { struct mgmt_session_info boot_sess; struct invalidate_command_table inv_tbl[128]; + struct be_aic_obj aic_obj[MAX_CPUS]; unsigned int attr_log_enable; int (*iotask_fn)(struct iscsi_task *, struct scatterlist *sg, diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 088bdf752cfa..07934b0b9ee1 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -155,6 +155,43 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba) } } +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, + struct be_set_eqd *set_eqd, int num) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_cmd_req_modify_eq_delay *req; + unsigned int tag = 0; + int i; + + spin_lock(&ctrl->mbox_lock); + tag = alloc_mcc_tag(phba); + if (!tag) { + spin_unlock(&ctrl->mbox_lock); + return tag; + } + + wrb = wrb_from_mccq(phba); + req = embedded_payload(wrb); + + wrb->tag0 |= tag; + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); + + req->num_eq = cpu_to_le32(num); + for (i = 0; i < num; i++) { + req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); + req->delay[i].phase = 0; + req->delay[i].delay_multiplier = + cpu_to_le32(set_eqd[i].delay_multiplier); + } + + be_mcc_notify(phba); + spin_unlock(&ctrl->mbox_lock); + return tag; +} + /** * mgmt_reopen_session()- Reopen a session based on reopen_type * @phba: Device priv structure instance @@ -447,8 +484,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd) { struct be_cmd_resp_hdr *resp; - struct be_mcc_wrb *wrb = wrb_from_mccq(phba); - struct be_sge *mcc_sge = nonembedded_sgl(wrb); + struct be_mcc_wrb *wrb; + struct be_sge *mcc_sge; unsigned int tag = 0; struct iscsi_bsg_request *bsg_req = job->request; struct be_bsg_vendor_cmd *req = nonemb_cmd->va; @@ -465,7 +502,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, req->sector = sector; req->offset = offset; spin_lock(&ctrl->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case BEISCSI_WRITE_FLASH: @@ -495,6 +531,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, return tag; } + wrb = wrb_from_mccq(phba); + mcc_sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, job->request_payload.sg_cnt); mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); @@ -525,7 +563,6 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num) int status = 0; spin_lock(&ctrl->mbox_lock); - memset(wrb, 0, sizeof(*wrb)); be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, @@ -675,7 +712,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba, struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr; struct be_ctrl_info *ctrl = &phba->ctrl; struct be_mcc_wrb *wrb; - struct tcp_connect_and_offload_in *req; + struct tcp_connect_and_offload_in_v1 *req; unsigned short def_hdr_id; unsigned short def_data_id; struct phys_addr template_address = { 0, 0 }; @@ -702,17 +739,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba, return tag; } wrb = wrb_from_mccq(phba); - memset(wrb, 0, sizeof(*wrb)); sge = nonembedded_sgl(wrb); req = nonemb_cmd->va; memset(req, 0, sizeof(*req)); wrb->tag0 |= tag; - be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); + be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1); be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, - sizeof(*req)); + nonemb_cmd->size); if (dst_addr->sa_family == PF_INET) { __be32 s_addr = daddr_in->sin_addr.s_addr; req->ip_address.ip_type = BE2_IPV4; @@ -758,6 +794,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba, sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(nonemb_cmd->size); + + if (!is_chip_be2_be3r(phba)) { + req->hdr.version = MBX_CMD_VER1; + req->tcp_window_size = 0; + req->tcp_window_scale_count = 2; + } + be_mcc_notify(phba); spin_unlock(&ctrl->mbox_lock); return tag; @@ -804,7 +847,7 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, int resp_buf_len) { struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb = wrb_from_mccq(phba); + struct be_mcc_wrb *wrb; struct be_sge *sge; unsigned int tag; int rc = 0; @@ -816,7 +859,8 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba, rc = -ENOMEM; goto free_cmd; } - memset(wrb, 0, sizeof(*wrb)); + + wrb = wrb_from_mccq(phba); wrb->tag0 |= tag; sge = nonembedded_sgl(wrb); @@ -964,10 +1008,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba, BE2_IPV6 : BE2_IPV4 ; rc = mgmt_get_if_info(phba, ip_type, &if_info); - if (rc) { - kfree(if_info); + if (rc) return rc; - } if (boot_proto == ISCSI_BOOTPROTO_DHCP) { if (if_info->dhcp_state) { diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 01b8c97284c0..24a8fc577477 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -335,5 +335,7 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params, void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle); void beiscsi_ue_detect(struct beiscsi_hba *phba); +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, + struct be_set_eqd *, int num); #endif diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index cc0fbcdc5192..7593b7c1d336 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -507,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport) struct bfad_vport_s *vport; int rc; - vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); + vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC); if (!vport) { bfa_trc(bfad, 0); return; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 1d41f4b9114f..785d0d71781e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -464,7 +464,7 @@ static int bnx2fc_l2_rcv_thread(void *arg) struct fcoe_percpu_s *bg = arg; struct sk_buff *skb; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); @@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); - stats = per_cpu_ptr(lport->stats, get_cpu()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; - fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { - put_cpu(); kfree_skb(skb); return; } fr_eof(fp) = crc_eof.fcoe_eof; fr_crc(fp) = crc_eof.fcoe_crc32; if (pskb_trim(skb, fr_len)) { - put_cpu(); kfree_skb(skb); return; } @@ -544,7 +538,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) port = lport_priv(vn_port); if (!ether_addr_equal(port->data_src_addr, dest_mac)) { BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); - put_cpu(); kfree_skb(skb); return; } @@ -552,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) { /* Drop FCP data. We dont this in L2 path */ - put_cpu(); kfree_skb(skb); return; } @@ -562,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) case ELS_LOGO: if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { /* drop non-FIP LOGO */ - put_cpu(); kfree_skb(skb); return; } @@ -572,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { /* Drop incoming ABTS */ - put_cpu(); kfree_skb(skb); return; } + stats = per_cpu_ptr(lport->stats, smp_processor_id()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + if (le32_to_cpu(fr_crc(fp)) != ~crc32(~0, skb->data, fr_len)) { if (stats->InvalidCRCCount < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); stats->InvalidCRCCount++; - put_cpu(); kfree_skb(skb); return; } - put_cpu(); fc_exch_recv(lport, fp); } @@ -602,7 +594,7 @@ int bnx2fc_percpu_io_thread(void *arg) struct bnx2fc_work *work, *tmp; LIST_HEAD(work_list); - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 46a37657307f..512aed3ae4f1 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1966,26 +1966,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) { int i; int segment_count; - int hash_table_size; u32 *pbl; - segment_count = hba->hash_tbl_segment_count; - hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * - sizeof(struct fcoe_hash_table_entry); + if (hba->hash_tbl_segments) { - pbl = hba->hash_tbl_pbl; - for (i = 0; i < segment_count; ++i) { - dma_addr_t dma_address; + pbl = hba->hash_tbl_pbl; + if (pbl) { + segment_count = hba->hash_tbl_segment_count; + for (i = 0; i < segment_count; ++i) { + dma_addr_t dma_address; - dma_address = le32_to_cpu(*pbl); - ++pbl; - dma_address += ((u64)le32_to_cpu(*pbl)) << 32; - ++pbl; - dma_free_coherent(&hba->pcidev->dev, - BNX2FC_HASH_TBL_CHUNK_SIZE, - hba->hash_tbl_segments[i], - dma_address); + dma_address = le32_to_cpu(*pbl); + ++pbl; + dma_address += ((u64)le32_to_cpu(*pbl)) << 32; + ++pbl; + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_address); + } + } + kfree(hba->hash_tbl_segments); + hba->hash_tbl_segments = NULL; } if (hba->hash_tbl_pbl) { @@ -2023,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); if (!dma_segment_array) { printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); - return -ENOMEM; + goto cleanup_ht; } for (i = 0; i < segment_count; ++i) { @@ -2034,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) GFP_KERNEL); if (!hba->hash_tbl_segments[i]) { printk(KERN_ERR PFX "hash segment alloc failed\n"); - while (--i >= 0) { - dma_free_coherent(&hba->pcidev->dev, - BNX2FC_HASH_TBL_CHUNK_SIZE, - hba->hash_tbl_segments[i], - dma_segment_array[i]); - hba->hash_tbl_segments[i] = NULL; - } - kfree(dma_segment_array); - return -ENOMEM; + goto cleanup_dma; } memset(hba->hash_tbl_segments[i], 0, BNX2FC_HASH_TBL_CHUNK_SIZE); @@ -2054,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) GFP_KERNEL); if (!hba->hash_tbl_pbl) { printk(KERN_ERR PFX "hash table pbl alloc failed\n"); - kfree(dma_segment_array); - return -ENOMEM; + goto cleanup_dma; } memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); @@ -2080,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) } kfree(dma_segment_array); return 0; + +cleanup_dma: + for (i = 0; i < segment_count; ++i) { + if (hba->hash_tbl_segments[i]) + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_segment_array[i]); + } + + kfree(dma_segment_array); + +cleanup_ht: + kfree(hba->hash_tbl_segments); + hba->hash_tbl_segments = NULL; + return -ENOMEM; } /** diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 32a5e0a2a669..7bc47fc7c686 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) arr_sz, GFP_KERNEL); if (!cmgr->free_list_lock) { printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); + kfree(cmgr->free_list); + cmgr->free_list = NULL; goto mem_err; } diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index b5ffd280a1ae..d6d491c2f004 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1870,7 +1870,7 @@ int bnx2i_percpu_io_thread(void *arg) struct bnx2i_work *work, *tmp; LIST_HEAD(work_list); - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); while (!kthread_should_stop()) { spin_lock_bh(&p->p_work_lock); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index e8ee5e5fe0ef..79788a12712d 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -19,6 +19,7 @@ #include <net/tcp.h> #include <net/dst.h> #include <linux/netdevice.h> +#include <net/addrconf.h> #include "t4_regs.h" #include "t4_msg.h" @@ -150,6 +151,7 @@ static struct scsi_transport_template *cxgb4i_stt; * The section below implments CPLs that related to iscsi tcp connection * open/close/abort and data send/receive. */ + #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define RCV_BUFSIZ_MASK 0x3FFU #define MAX_IMM_TX_PKT_LEN 128 @@ -179,6 +181,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, struct l2t_entry *e) { struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); + int t4 = is_t4(lldi->adapter_type); int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); unsigned long long opt0; unsigned int opt2; @@ -248,6 +251,97 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, } set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); + + pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n", + (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk, + csk->state, csk->flags, csk->atid, csk->rss_qid); + + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); +} + +static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, + struct l2t_entry *e) +{ + struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); + int t4 = is_t4(lldi->adapter_type); + int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); + unsigned long long opt0; + unsigned int opt2; + unsigned int qid_atid = ((unsigned int)csk->atid) | + (((unsigned int)csk->rss_qid) << 14); + + opt0 = KEEP_ALIVE(1) | + WND_SCALE(wscale) | + MSS_IDX(csk->mss_idx) | + L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) | + TX_CHAN(csk->tx_chan) | + SMAC_SEL(csk->smac_idx) | + ULP_MODE(ULP_MODE_ISCSI) | + RCV_BUFSIZ(cxgb4i_rcv_win >> 10); + + opt2 = RX_CHANNEL(0) | + RSS_QUEUE_VALID | + RX_FC_DISABLE | + RSS_QUEUE(csk->rss_qid); + + if (t4) { + struct cpl_act_open_req6 *req = + (struct cpl_act_open_req6 *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, + qid_atid)); + req->local_port = csk->saddr6.sin6_port; + req->peer_port = csk->daddr6.sin6_port; + + req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); + req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + + 8); + req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); + req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + + 8); + + req->opt0 = cpu_to_be64(opt0); + + opt2 |= RX_FC_VALID; + req->opt2 = cpu_to_be32(opt2); + + req->params = cpu_to_be32(cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t)); + } else { + struct cpl_t5_act_open_req6 *req = + (struct cpl_t5_act_open_req6 *)skb->head; + + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, + qid_atid)); + req->local_port = csk->saddr6.sin6_port; + req->peer_port = csk->daddr6.sin6_port; + req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); + req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + + 8); + req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); + req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + + 8); + req->opt0 = cpu_to_be64(opt0); + + opt2 |= T5_OPT_2_VALID; + req->opt2 = cpu_to_be32(opt2); + + req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple( + csk->cdev->ports[csk->port_id], + csk->l2t))); + } + + set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); + + pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n", + t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid, + &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), + &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), + csk->rss_qid); + cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); } @@ -586,9 +680,11 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb) goto rel_skb; } - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n", - csk, csk->state, csk->flags, tid, atid, rcv_isn); + pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n", + (&csk->saddr), (&csk->daddr), + atid, tid, csk, csk->state, csk->flags, rcv_isn); + + module_put(THIS_MODULE); cxgbi_sock_get(csk); csk->tid = tid; @@ -663,6 +759,9 @@ static void csk_act_open_retry_timer(unsigned long data) struct sk_buff *skb; struct cxgbi_sock *csk = (struct cxgbi_sock *)data; struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); + void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, + struct l2t_entry *); + int t4 = is_t4(lldi->adapter_type), size, size6; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", @@ -670,20 +769,35 @@ static void csk_act_open_retry_timer(unsigned long data) cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); - skb = alloc_wr(is_t4(lldi->adapter_type) ? - sizeof(struct cpl_act_open_req) : - sizeof(struct cpl_t5_act_open_req), - 0, GFP_ATOMIC); + + if (t4) { + size = sizeof(struct cpl_act_open_req); + size6 = sizeof(struct cpl_act_open_req6); + } else { + size = sizeof(struct cpl_t5_act_open_req); + size6 = sizeof(struct cpl_t5_act_open_req6); + } + + if (csk->csk_family == AF_INET) { + send_act_open_func = send_act_open_req; + skb = alloc_wr(size, 0, GFP_ATOMIC); + } else { + send_act_open_func = send_act_open_req6; + skb = alloc_wr(size6, 0, GFP_ATOMIC); + } + if (!skb) cxgbi_sock_fail_act_open(csk, -ENOMEM); else { skb->sk = (struct sock *)csk; t4_set_arp_err_handler(skb, csk, - cxgbi_sock_act_open_req_arp_failure); - send_act_open_req(csk, skb, csk->l2t); + cxgbi_sock_act_open_req_arp_failure); + send_act_open_func(csk, skb, csk->l2t); } + spin_unlock_bh(&csk->lock); cxgbi_sock_put(csk); + } static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) @@ -703,10 +817,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) goto rel_skb; } - pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n", - &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), - &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), - atid, tid, status, csk, csk->state, csk->flags); + pr_info_ipaddr("tid %u/%u, status %u.\n" + "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), + atid, tid, status, csk, csk->state, csk->flags); if (status == CPL_ERR_RTX_NEG_ADVICE) goto rel_skb; @@ -746,9 +859,9 @@ static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb) pr_err("can't find connection for tid %u.\n", tid); goto rel_skb; } - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "csk 0x%p,%u,0x%lx,%u.\n", - csk, csk->state, csk->flags, csk->tid); + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", + (&csk->saddr), (&csk->daddr), + csk, csk->state, csk->flags, csk->tid); cxgbi_sock_rcv_peer_close(csk); rel_skb: __kfree_skb(skb); @@ -767,9 +880,9 @@ static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) pr_err("can't find connection for tid %u.\n", tid); goto rel_skb; } - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "csk 0x%p,%u,0x%lx,%u.\n", - csk, csk->state, csk->flags, csk->tid); + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n", + (&csk->saddr), (&csk->daddr), + csk, csk->state, csk->flags, csk->tid); cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); rel_skb: __kfree_skb(skb); @@ -808,9 +921,9 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb) goto rel_skb; } - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n", - csk, csk->state, csk->flags, csk->tid, req->status); + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", + (&csk->saddr), (&csk->daddr), + csk, csk->state, csk->flags, csk->tid, req->status); if (req->status == CPL_ERR_RTX_NEG_ADVICE || req->status == CPL_ERR_PERSIST_NEG_ADVICE) @@ -851,10 +964,10 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb) if (!csk) goto rel_skb; - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "status 0x%x, csk 0x%p, s %u, 0x%lx.\n", - rpl->status, csk, csk ? csk->state : 0, - csk ? csk->flags : 0UL); + if (csk) + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n", + (&csk->saddr), (&csk->daddr), csk, + csk->state, csk->flags, csk->tid, rpl->status); if (rpl->status == CPL_ERR_ABORT_FAILED) goto rel_skb; @@ -1163,15 +1276,35 @@ static int init_act_open(struct cxgbi_sock *csk) struct cxgbi_device *cdev = csk->cdev; struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct net_device *ndev = cdev->ports[csk->port_id]; - struct port_info *pi = netdev_priv(ndev); struct sk_buff *skb = NULL; - struct neighbour *n; + struct neighbour *n = NULL; + void *daddr; unsigned int step; + unsigned int size, size6; + int t4 = is_t4(lldi->adapter_type); log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", csk, csk->state, csk->flags, csk->tid); + if (csk->csk_family == AF_INET) + daddr = &csk->daddr.sin_addr.s_addr; +#if IS_ENABLED(CONFIG_IPV6) + else if (csk->csk_family == AF_INET6) + daddr = &csk->daddr6.sin6_addr; +#endif + else { + pr_err("address family 0x%x not supported\n", csk->csk_family); + goto rel_resource; + } + + n = dst_neigh_lookup(csk->dst, daddr); + + if (!n) { + pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); + goto rel_resource; + } + csk->atid = cxgb4_alloc_atid(lldi->tids, csk); if (csk->atid < 0) { pr_err("%s, NO atid available.\n", ndev->name); @@ -1192,10 +1325,19 @@ static int init_act_open(struct cxgbi_sock *csk) } cxgbi_sock_get(csk); - skb = alloc_wr(is_t4(lldi->adapter_type) ? - sizeof(struct cpl_act_open_req) : - sizeof(struct cpl_t5_act_open_req), - 0, GFP_ATOMIC); + if (t4) { + size = sizeof(struct cpl_act_open_req); + size6 = sizeof(struct cpl_act_open_req6); + } else { + size = sizeof(struct cpl_t5_act_open_req); + size6 = sizeof(struct cpl_t5_act_open_req6); + } + + if (csk->csk_family == AF_INET) + skb = alloc_wr(size, 0, GFP_NOIO); + else + skb = alloc_wr(size6, 0, GFP_NOIO); + if (!skb) goto rel_resource; skb->sk = (struct sock *)csk; @@ -1211,19 +1353,27 @@ static int init_act_open(struct cxgbi_sock *csk) csk->txq_idx = cxgb4_port_idx(ndev) * step; step = lldi->nrxq / lldi->nchan; csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step]; - csk->wr_max_cred = csk->wr_cred = lldi->wr_cred; + csk->wr_cred = lldi->wr_cred - + DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16); + csk->wr_max_cred = csk->wr_cred; csk->wr_una_cred = 0; cxgbi_sock_reset_wr_list(csk); csk->err = 0; - log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, - "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n", - csk, pi->port_id, ndev->name, csk->tx_chan, - csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx, - csk->smac_idx); + pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n", + (&csk->saddr), (&csk->daddr), csk, csk->state, + csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, + csk->mtu, csk->mss_idx, csk->smac_idx); + + /* must wait for either a act_open_rpl or act_open_establish */ + try_module_get(THIS_MODULE); cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); - send_act_open_req(csk, skb, csk->l2t); + if (csk->csk_family == AF_INET) + send_act_open_req(csk, skb, csk->l2t); + else + send_act_open_req6(csk, skb, csk->l2t); neigh_release(n); + return 0; rel_resource: @@ -1234,8 +1384,6 @@ rel_resource: return -EINVAL; } -#define CPL_ISCSI_DATA 0xB2 -#define CPL_RX_ISCSI_DDP 0x49 cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = { [CPL_ACT_ESTABLISH] = do_act_establish, [CPL_ACT_OPEN_RPL] = do_act_open_rpl, @@ -1487,6 +1635,129 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev) return 0; } +#if IS_ENABLED(CONFIG_IPV6) +static int cxgbi_inet6addr_handler(struct notifier_block *this, + unsigned long event, void *data) +{ + struct inet6_ifaddr *ifa = data; + struct net_device *event_dev = ifa->idev->dev; + struct cxgbi_device *cdev; + int ret = NOTIFY_DONE; + + if (event_dev->priv_flags & IFF_802_1Q_VLAN) + event_dev = vlan_dev_real_dev(event_dev); + + cdev = cxgbi_device_find_by_netdev(event_dev, NULL); + + if (!cdev) + return ret; + + switch (event) { + case NETDEV_UP: + ret = cxgb4_clip_get(event_dev, + (const struct in6_addr *) + ((ifa)->addr.s6_addr)); + if (ret < 0) + return ret; + + ret = NOTIFY_OK; + break; + + case NETDEV_DOWN: + cxgb4_clip_release(event_dev, + (const struct in6_addr *) + ((ifa)->addr.s6_addr)); + ret = NOTIFY_OK; + break; + + default: + break; + } + + return ret; +} + +static struct notifier_block cxgbi_inet6addr_notifier = { + .notifier_call = cxgbi_inet6addr_handler +}; + +/* Retrieve IPv6 addresses from a root device (bond, vlan) associated with + * a physical device. + * The physical device reference is needed to send the actual CLIP command. + */ +static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) +{ + struct inet6_dev *idev = NULL; + struct inet6_ifaddr *ifa; + int ret = 0; + + idev = __in6_dev_get(root_dev); + if (!idev) + return ret; + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + pr_info("updating the clip for addr %pI6\n", + ifa->addr.s6_addr); + ret = cxgb4_clip_get(dev, (const struct in6_addr *) + ifa->addr.s6_addr); + if (ret < 0) + break; + } + + read_unlock_bh(&idev->lock); + return ret; +} + +static int update_root_dev_clip(struct net_device *dev) +{ + struct net_device *root_dev = NULL; + int i, ret = 0; + + /* First populate the real net device's IPv6 address */ + ret = update_dev_clip(dev, dev); + if (ret) + return ret; + + /* Parse all bond and vlan devices layered on top of the physical dev */ + root_dev = netdev_master_upper_dev_get(dev); + if (root_dev) { + ret = update_dev_clip(root_dev, dev); + if (ret) + return ret; + } + + for (i = 0; i < VLAN_N_VID; i++) { + root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); + if (!root_dev) + continue; + + ret = update_dev_clip(root_dev, dev); + if (ret) + break; + } + return ret; +} + +static void cxgbi_update_clip(struct cxgbi_device *cdev) +{ + int i; + + rcu_read_lock(); + + for (i = 0; i < cdev->nports; i++) { + struct net_device *dev = cdev->ports[i]; + int ret = 0; + + if (dev) + ret = update_root_dev_clip(dev); + if (ret < 0) + break; + } + rcu_read_unlock(); +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + static void *t4_uld_add(const struct cxgb4_lld_info *lldi) { struct cxgbi_device *cdev; @@ -1605,6 +1876,9 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state) switch (state) { case CXGB4_STATE_UP: pr_info("cdev 0x%p, UP.\n", cdev); +#if IS_ENABLED(CONFIG_IPV6) + cxgbi_update_clip(cdev); +#endif /* re-initialize */ break; case CXGB4_STATE_START_RECOVERY: @@ -1635,11 +1909,18 @@ static int __init cxgb4i_init_module(void) if (rc < 0) return rc; cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info); + +#if IS_ENABLED(CONFIG_IPV6) + register_inet6addr_notifier(&cxgbi_inet6addr_notifier); +#endif return 0; } static void __exit cxgb4i_exit_module(void) { +#if IS_ENABLED(CONFIG_IPV6) + unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier); +#endif cxgb4_unregister_uld(CXGB4_ULD_ISCSI); cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4); cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt); diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index b44c1cff3114..3d5322d59f15 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -24,6 +24,10 @@ #include <linux/inet.h> #include <net/dst.h> #include <net/route.h> +#include <net/ipv6.h> +#include <net/ip6_route.h> +#include <net/addrconf.h> + #include <linux/inetdevice.h> /* ip_dev_find */ #include <linux/module.h> #include <net/tcp.h> @@ -193,8 +197,8 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev) } EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev); -static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, - int *port) +struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, + int *port) { struct net_device *vdev = NULL; struct cxgbi_device *cdev, *tmp; @@ -224,6 +228,40 @@ static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev, "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name); return NULL; } +EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev); + +static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev, + int *port) +{ + struct net_device *vdev = NULL; + struct cxgbi_device *cdev, *tmp; + int i; + + if (ndev->priv_flags & IFF_802_1Q_VLAN) { + vdev = ndev; + ndev = vlan_dev_real_dev(ndev); + pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name); + } + + mutex_lock(&cdev_mutex); + list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) { + for (i = 0; i < cdev->nports; i++) { + if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr, + MAX_ADDR_LEN)) { + cdev->hbas[i]->vdev = vdev; + mutex_unlock(&cdev_mutex); + if (port) + *port = i; + return cdev; + } + } + } + mutex_unlock(&cdev_mutex); + log_debug(1 << CXGBI_DBG_DEV, + "ndev 0x%p, %s, NO match mac found.\n", + ndev, ndev->name); + return NULL; +} void cxgbi_hbas_remove(struct cxgbi_device *cdev) { @@ -320,6 +358,7 @@ static int sock_get_port(struct cxgbi_sock *csk) struct cxgbi_ports_map *pmap = &cdev->pmap; unsigned int start; int idx; + __be16 *port; if (!pmap->max_connect) { pr_err("cdev 0x%p, p#%u %s, NO port map.\n", @@ -327,9 +366,14 @@ static int sock_get_port(struct cxgbi_sock *csk) return -EADDRNOTAVAIL; } - if (csk->saddr.sin_port) { + if (csk->csk_family == AF_INET) + port = &csk->saddr.sin_port; + else /* ipv6 */ + port = &csk->saddr6.sin6_port; + + if (*port) { pr_err("source port NON-ZERO %u.\n", - ntohs(csk->saddr.sin_port)); + ntohs(*port)); return -EADDRINUSE; } @@ -347,8 +391,7 @@ static int sock_get_port(struct cxgbi_sock *csk) idx = 0; if (!pmap->port_csk[idx]) { pmap->used++; - csk->saddr.sin_port = - htons(pmap->sport_base + idx); + *port = htons(pmap->sport_base + idx); pmap->next = idx; pmap->port_csk[idx] = csk; spin_unlock_bh(&pmap->lock); @@ -374,16 +417,22 @@ static void sock_put_port(struct cxgbi_sock *csk) { struct cxgbi_device *cdev = csk->cdev; struct cxgbi_ports_map *pmap = &cdev->pmap; + __be16 *port; - if (csk->saddr.sin_port) { - int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base; + if (csk->csk_family == AF_INET) + port = &csk->saddr.sin_port; + else /* ipv6 */ + port = &csk->saddr6.sin6_port; - csk->saddr.sin_port = 0; + if (*port) { + int idx = ntohs(*port) - pmap->sport_base; + + *port = 0; if (idx < 0 || idx >= pmap->max_connect) { pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n", cdev, csk->port_id, cdev->ports[csk->port_id]->name, - ntohs(csk->saddr.sin_port)); + ntohs(*port)); return; } @@ -479,17 +528,11 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) int port = 0xFFFF; int err = 0; - if (daddr->sin_family != AF_INET) { - pr_info("address family 0x%x NOT supported.\n", - daddr->sin_family); - err = -EAFNOSUPPORT; - goto err_out; - } - rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0); if (!rt) { pr_info("no route to ipv4 0x%x, port %u.\n", - daddr->sin_addr.s_addr, daddr->sin_port); + be32_to_cpu(daddr->sin_addr.s_addr), + be16_to_cpu(daddr->sin_port)); err = -ENETUNREACH; goto err_out; } @@ -537,9 +580,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) csk->port_id = port; csk->mtu = mtu; csk->dst = dst; + + csk->csk_family = AF_INET; csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; csk->daddr.sin_port = daddr->sin_port; csk->daddr.sin_family = daddr->sin_family; + csk->saddr.sin_family = daddr->sin_family; csk->saddr.sin_addr.s_addr = fl4.saddr; neigh_release(n); @@ -556,6 +602,123 @@ err_out: return ERR_PTR(err); } +#if IS_ENABLED(CONFIG_IPV6) +static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr, + const struct in6_addr *daddr) +{ + struct flowi6 fl; + + if (saddr) + memcpy(&fl.saddr, saddr, sizeof(struct in6_addr)); + if (daddr) + memcpy(&fl.daddr, daddr, sizeof(struct in6_addr)); + return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl); +} + +static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr) +{ + struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr; + struct dst_entry *dst; + struct net_device *ndev; + struct cxgbi_device *cdev; + struct rt6_info *rt = NULL; + struct neighbour *n; + struct in6_addr pref_saddr; + struct cxgbi_sock *csk = NULL; + unsigned int mtu = 0; + int port = 0xFFFF; + int err = 0; + + rt = find_route_ipv6(NULL, &daddr6->sin6_addr); + + if (!rt) { + pr_info("no route to ipv6 %pI6 port %u\n", + daddr6->sin6_addr.s6_addr, + be16_to_cpu(daddr6->sin6_port)); + err = -ENETUNREACH; + goto err_out; + } + + dst = &rt->dst; + + n = dst_neigh_lookup(dst, &daddr6->sin6_addr); + + if (!n) { + pr_info("%pI6, port %u, dst no neighbour.\n", + daddr6->sin6_addr.s6_addr, + be16_to_cpu(daddr6->sin6_port)); + err = -ENETUNREACH; + goto rel_rt; + } + ndev = n->dev; + + if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { + pr_info("multi-cast route %pI6 port %u, dev %s.\n", + daddr6->sin6_addr.s6_addr, + ntohs(daddr6->sin6_port), ndev->name); + err = -ENETUNREACH; + goto rel_rt; + } + + cdev = cxgbi_device_find_by_netdev(ndev, &port); + if (!cdev) + cdev = cxgbi_device_find_by_mac(ndev, &port); + if (!cdev) { + pr_info("dst %pI6 %s, NOT cxgbi device.\n", + daddr6->sin6_addr.s6_addr, ndev->name); + err = -ENETUNREACH; + goto rel_rt; + } + log_debug(1 << CXGBI_DBG_SOCK, + "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n", + daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port, + ndev->name, cdev); + + csk = cxgbi_sock_create(cdev); + if (!csk) { + err = -ENOMEM; + goto rel_rt; + } + csk->cdev = cdev; + csk->port_id = port; + csk->mtu = mtu; + csk->dst = dst; + + if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) { + struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt); + + err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL, + &daddr6->sin6_addr, 0, &pref_saddr); + if (err) { + pr_info("failed to get source address to reach %pI6\n", + &daddr6->sin6_addr); + goto rel_rt; + } + } else { + pref_saddr = rt->rt6i_prefsrc.addr; + } + + csk->csk_family = AF_INET6; + csk->daddr6.sin6_addr = daddr6->sin6_addr; + csk->daddr6.sin6_port = daddr6->sin6_port; + csk->daddr6.sin6_family = daddr6->sin6_family; + csk->saddr6.sin6_addr = pref_saddr; + + neigh_release(n); + return csk; + +rel_rt: + if (n) + neigh_release(n); + + ip6_rt_put(rt); + if (csk) + cxgbi_sock_closed(csk); +err_out: + return ERR_PTR(err); +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, unsigned int opt) { @@ -2194,6 +2357,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, } EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); +static inline int csk_print_port(struct cxgbi_sock *csk, char *buf) +{ + int len; + + cxgbi_sock_get(csk); + len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port)); + cxgbi_sock_put(csk); + + return len; +} + +static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf) +{ + int len; + + cxgbi_sock_get(csk); + if (csk->csk_family == AF_INET) + len = sprintf(buf, "%pI4", + &csk->daddr.sin_addr.s_addr); + else + len = sprintf(buf, "%pI6", + &csk->daddr6.sin6_addr); + + cxgbi_sock_put(csk); + + return len; +} + int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { @@ -2447,7 +2638,19 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost, } } - csk = cxgbi_check_route(dst_addr); + if (dst_addr->sa_family == AF_INET) { + csk = cxgbi_check_route(dst_addr); +#if IS_ENABLED(CONFIG_IPV6) + } else if (dst_addr->sa_family == AF_INET6) { + csk = cxgbi_check_route6(dst_addr); +#endif + } else { + pr_info("address family 0x%x NOT supported.\n", + dst_addr->sa_family); + err = -EAFNOSUPPORT; + return (struct iscsi_endpoint *)ERR_PTR(err); + } + if (IS_ERR(csk)) return (struct iscsi_endpoint *)csk; cxgbi_sock_get(csk); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 8135f04671af..8ad73d913f02 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -44,6 +44,15 @@ enum cxgbi_dbg_flag { pr_info(fmt, ##__VA_ARGS__); \ } while (0) +#define pr_info_ipaddr(fmt_trail, \ + addr1, addr2, args_trail...) \ +do { \ + if (!((1 << CXGBI_DBG_SOCK) & dbg_level)) \ + break; \ + pr_info("%pISpc - %pISpc, " fmt_trail, \ + addr1, addr2, args_trail); \ +} while (0) + /* max. connections per adapter */ #define CXGBI_MAX_CONN 16384 @@ -202,8 +211,15 @@ struct cxgbi_sock { spinlock_t lock; struct kref refcnt; unsigned int state; - struct sockaddr_in saddr; - struct sockaddr_in daddr; + unsigned int csk_family; + union { + struct sockaddr_in saddr; + struct sockaddr_in6 saddr6; + }; + union { + struct sockaddr_in daddr; + struct sockaddr_in6 daddr6; + }; struct dst_entry *dst; struct sk_buff_head receive_queue; struct sk_buff_head write_queue; @@ -692,6 +708,7 @@ struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); void cxgbi_device_unregister(struct cxgbi_device *); void cxgbi_device_unregister_all(unsigned int flag); struct cxgbi_device *cxgbi_device_find_by_lldev(void *); +struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *); int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int, struct scsi_host_template *, struct scsi_transport_template *); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 5248c888552b..7bcf67eec921 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -120,6 +120,7 @@ static struct request *get_alua_req(struct scsi_device *sdev, "%s: blk_get_request failed\n", __func__); return NULL; } + blk_rq_set_block_pc(rq); if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { blk_put_request(rq); @@ -128,7 +129,6 @@ static struct request *get_alua_req(struct scsi_device *sdev, return NULL; } - rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->retries = ALUA_FAILOVER_RETRIES; diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index e1c8be06de9d..6f07f7fe3aa1 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -280,6 +280,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, return NULL; } + blk_rq_set_block_pc(rq); rq->cmd_len = COMMAND_SIZE(cmd); rq->cmd[0] = cmd; @@ -304,7 +305,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd, break; } - rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->timeout = CLARIION_TIMEOUT; diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 084062bb8ee9..e9d9fea9e272 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -120,7 +120,7 @@ retry: if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); @@ -250,7 +250,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h) if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; req->cmd_len = COMMAND_SIZE(START_STOP); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 4b9cf93f3fb6..826069db9848 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -279,6 +279,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev, "get_rdac_req: blk_get_request failed.\n"); return NULL; } + blk_rq_set_block_pc(rq); if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { blk_put_request(rq); @@ -287,7 +288,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev, return NULL; } - rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->retries = RDAC_RETRIES; diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index eb29fe7eaf49..0a667fe05006 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -3,8 +3,6 @@ #define PSEUDO_DMA #define DONT_USE_INTR #define UNSAFE /* Leave interrupts enabled during pseudo-dma I/O */ -#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\ - NDEBUG_SELECTION+NDEBUG_ARBITRATION) #define DMA_WORKS_RIGHT diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index f37f3e3dd5d5..6504a195c874 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -390,7 +390,7 @@ static int esas2r_probe(struct pci_dev *pcid, esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), "pci_enable_device() OK"); esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), - "after pci_device_enable() enable_cnt: %d", + "after pci_enable_device() enable_cnt: %d", pcid->enable_cnt.counter); host = scsi_host_alloc(&driver_template, host_alloc_size); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index d5e105b173f0..00ee0ed642aa 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1872,7 +1872,7 @@ static int fcoe_percpu_receive_thread(void *arg) skb_queue_head_init(&tmp); - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); retry: while (!kthread_should_stop()) { diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 528d43b7b569..1d3521e13d77 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -39,14 +39,15 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.45" +#define DRV_VERSION "1.6.0.10" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " #define DESC_CLEAN_LOW_WATERMARK 8 #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ -#define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ +#define FNIC_MAX_IO_REQ 1024 /* scsi_cmnd tag map entries */ +#define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ #define FNIC_DFLT_QUEUE_DEPTH 32 #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index b6073f875761..2c613bdea78f 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -25,6 +25,21 @@ static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_enable; static struct dentry *fnic_stats_debugfs_root; +static struct dentry *fnic_fc_trace_debugfs_file; +static struct dentry *fnic_fc_rdata_trace_debugfs_file; +static struct dentry *fnic_fc_trace_enable; +static struct dentry *fnic_fc_trace_clear; + +struct fc_trace_flag_type { + u8 fc_row_file; + u8 fc_normal_file; + u8 fnic_trace; + u8 fc_trace; + u8 fc_clear; +}; + +static struct fc_trace_flag_type *fc_trc_flag; + /* * fnic_debugfs_init - Initialize debugfs for fnic debug logging * @@ -56,6 +71,18 @@ int fnic_debugfs_init(void) return rc; } + /* Allocate memory to structure */ + fc_trc_flag = (struct fc_trace_flag_type *) + vmalloc(sizeof(struct fc_trace_flag_type)); + + if (fc_trc_flag) { + fc_trc_flag->fc_row_file = 0; + fc_trc_flag->fc_normal_file = 1; + fc_trc_flag->fnic_trace = 2; + fc_trc_flag->fc_trace = 3; + fc_trc_flag->fc_clear = 4; + } + rc = 0; return rc; } @@ -74,15 +101,19 @@ void fnic_debugfs_terminate(void) debugfs_remove(fnic_trace_debugfs_root); fnic_trace_debugfs_root = NULL; + + if (fc_trc_flag) + vfree(fc_trc_flag); } /* - * fnic_trace_ctrl_open - Open the trace_enable file + * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace + * Or Open fc_trace_enable file for fc_trace * @inode: The inode pointer. * @file: The file pointer to attach the trace enable/disable flag. * * Description: - * This routine opens a debugsfs file trace_enable. + * This routine opens a debugsfs file trace_enable or fc_trace_enable. * * Returns: * This function returns zero if successful. @@ -94,15 +125,19 @@ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp) } /* - * fnic_trace_ctrl_read - Read a trace_enable debugfs file + * fnic_trace_ctrl_read - + * Read trace_enable ,fc_trace_enable + * or fc_trace_clear debugfs file * @filp: The file pointer to read from. * @ubuf: The buffer to copy the data to. * @cnt: The number of bytes to read. * @ppos: The position in the file to start reading from. * * Description: - * This routine reads value of variable fnic_tracing_enabled - * and stores into local @buf. It will start reading file at @ppos and + * This routine reads value of variable fnic_tracing_enabled or + * fnic_fc_tracing_enabled or fnic_fc_trace_cleared + * and stores into local @buf. + * It will start reading file at @ppos and * copy up to @cnt of data to @ubuf from @buf. * * Returns: @@ -114,13 +149,25 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp, { char buf[64]; int len; - len = sprintf(buf, "%u\n", fnic_tracing_enabled); + u8 *trace_type; + len = 0; + trace_type = (u8 *)filp->private_data; + if (*trace_type == fc_trc_flag->fnic_trace) + len = sprintf(buf, "%u\n", fnic_tracing_enabled); + else if (*trace_type == fc_trc_flag->fc_trace) + len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled); + else if (*trace_type == fc_trc_flag->fc_clear) + len = sprintf(buf, "%u\n", fnic_fc_trace_cleared); + else + pr_err("fnic: Cannot read to any debugfs file\n"); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } /* - * fnic_trace_ctrl_write - Write to trace_enable debugfs file + * fnic_trace_ctrl_write - + * Write to trace_enable, fc_trace_enable or + * fc_trace_clear debugfs file * @filp: The file pointer to write from. * @ubuf: The buffer to copy the data from. * @cnt: The number of bytes to write. @@ -128,7 +175,8 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp, * * Description: * This routine writes data from user buffer @ubuf to buffer @buf and - * sets fnic_tracing_enabled value as per user input. + * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared + * value as per user input. * * Returns: * This function returns the amount of data that was written. @@ -140,6 +188,8 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp, char buf[64]; unsigned long val; int ret; + u8 *trace_type; + trace_type = (u8 *)filp->private_data; if (cnt >= sizeof(buf)) return -EINVAL; @@ -153,12 +203,27 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp, if (ret < 0) return ret; - fnic_tracing_enabled = val; + if (*trace_type == fc_trc_flag->fnic_trace) + fnic_tracing_enabled = val; + else if (*trace_type == fc_trc_flag->fc_trace) + fnic_fc_tracing_enabled = val; + else if (*trace_type == fc_trc_flag->fc_clear) + fnic_fc_trace_cleared = val; + else + pr_err("fnic: cannot write to any debufs file\n"); + (*ppos)++; return cnt; } +static const struct file_operations fnic_trace_ctrl_fops = { + .owner = THIS_MODULE, + .open = fnic_trace_ctrl_open, + .read = fnic_trace_ctrl_read, + .write = fnic_trace_ctrl_write, +}; + /* * fnic_trace_debugfs_open - Open the fnic trace log * @inode: The inode pointer @@ -178,19 +243,36 @@ static int fnic_trace_debugfs_open(struct inode *inode, struct file *file) { fnic_dbgfs_t *fnic_dbg_prt; + u8 *rdata_ptr; + rdata_ptr = (u8 *)inode->i_private; fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL); if (!fnic_dbg_prt) return -ENOMEM; - fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE))); - if (!fnic_dbg_prt->buffer) { - kfree(fnic_dbg_prt); - return -ENOMEM; + if (*rdata_ptr == fc_trc_flag->fnic_trace) { + fnic_dbg_prt->buffer = vmalloc(3 * + (trace_max_pages * PAGE_SIZE)); + if (!fnic_dbg_prt->buffer) { + kfree(fnic_dbg_prt); + return -ENOMEM; + } + memset((void *)fnic_dbg_prt->buffer, 0, + 3 * (trace_max_pages * PAGE_SIZE)); + fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); + } else { + fnic_dbg_prt->buffer = + vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); + if (!fnic_dbg_prt->buffer) { + kfree(fnic_dbg_prt); + return -ENOMEM; + } + memset((void *)fnic_dbg_prt->buffer, 0, + 3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); + fnic_dbg_prt->buffer_len = + fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr); } - memset((void *)fnic_dbg_prt->buffer, 0, - (3*(trace_max_pages * PAGE_SIZE))); - fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); file->private_data = fnic_dbg_prt; + return 0; } @@ -272,13 +354,6 @@ static int fnic_trace_debugfs_release(struct inode *inode, return 0; } -static const struct file_operations fnic_trace_ctrl_fops = { - .owner = THIS_MODULE, - .open = fnic_trace_ctrl_open, - .read = fnic_trace_ctrl_read, - .write = fnic_trace_ctrl_write, -}; - static const struct file_operations fnic_trace_debugfs_fops = { .owner = THIS_MODULE, .open = fnic_trace_debugfs_open, @@ -306,9 +381,10 @@ int fnic_trace_debugfs_init(void) return rc; } fnic_trace_enable = debugfs_create_file("tracing_enable", - S_IFREG|S_IRUGO|S_IWUSR, - fnic_trace_debugfs_root, - NULL, &fnic_trace_ctrl_fops); + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fnic_trace), + &fnic_trace_ctrl_fops); if (!fnic_trace_enable) { printk(KERN_DEBUG @@ -317,10 +393,10 @@ int fnic_trace_debugfs_init(void) } fnic_trace_debugfs_file = debugfs_create_file("trace", - S_IFREG|S_IRUGO|S_IWUSR, - fnic_trace_debugfs_root, - NULL, - &fnic_trace_debugfs_fops); + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fnic_trace), + &fnic_trace_debugfs_fops); if (!fnic_trace_debugfs_file) { printk(KERN_DEBUG @@ -340,14 +416,104 @@ int fnic_trace_debugfs_init(void) */ void fnic_trace_debugfs_terminate(void) { - if (fnic_trace_debugfs_file) { - debugfs_remove(fnic_trace_debugfs_file); - fnic_trace_debugfs_file = NULL; + debugfs_remove(fnic_trace_debugfs_file); + fnic_trace_debugfs_file = NULL; + + debugfs_remove(fnic_trace_enable); + fnic_trace_enable = NULL; +} + +/* + * fnic_fc_trace_debugfs_init - + * Initialize debugfs for fnic control frame trace logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic_fc debugfs + * file system. If not already created, this routine will create the + * create file trace to log fnic fc trace buffer output into debugfs and + * it will also create file fc_trace_enable to control enable/disable of + * trace logging into trace buffer. + */ + +int fnic_fc_trace_debugfs_init(void) +{ + int rc = -1; + + if (!fnic_trace_debugfs_root) { + pr_err("fnic:Debugfs root directory doesn't exist\n"); + return rc; + } + + fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_trace), + &fnic_trace_ctrl_fops); + + if (!fnic_fc_trace_enable) { + pr_err("fnic: Failed create fc_trace_enable file\n"); + return rc; + } + + fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_clear), + &fnic_trace_ctrl_fops); + + if (!fnic_fc_trace_clear) { + pr_err("fnic: Failed to create fc_trace_enable file\n"); + return rc; + } + + fnic_fc_rdata_trace_debugfs_file = + debugfs_create_file("fc_trace_rdata", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_normal_file), + &fnic_trace_debugfs_fops); + + if (!fnic_fc_rdata_trace_debugfs_file) { + pr_err("fnic: Failed create fc_rdata_trace file\n"); + return rc; } - if (fnic_trace_enable) { - debugfs_remove(fnic_trace_enable); - fnic_trace_enable = NULL; + + fnic_fc_trace_debugfs_file = + debugfs_create_file("fc_trace", + S_IFREG|S_IRUGO|S_IWUSR, + fnic_trace_debugfs_root, + &(fc_trc_flag->fc_row_file), + &fnic_trace_debugfs_fops); + + if (!fnic_fc_trace_debugfs_file) { + pr_err("fnic: Failed to create fc_trace file\n"); + return rc; } + rc = 0; + return rc; +} + +/* + * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic_fc trace logging. + */ + +void fnic_fc_trace_debugfs_terminate(void) +{ + debugfs_remove(fnic_fc_trace_debugfs_file); + fnic_fc_trace_debugfs_file = NULL; + + debugfs_remove(fnic_fc_rdata_trace_debugfs_file); + fnic_fc_rdata_trace_debugfs_file = NULL; + + debugfs_remove(fnic_fc_trace_enable); + fnic_fc_trace_enable = NULL; + + debugfs_remove(fnic_fc_trace_clear); + fnic_fc_trace_clear = NULL; } /* diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 1671325aec7f..1b948f633fc5 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -66,19 +66,35 @@ void fnic_handle_link(struct work_struct *work) fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); if (old_link_status == fnic->link_status) { - if (!fnic->link_status) + if (!fnic->link_status) { /* DOWN -> DOWN */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); - else { + fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_LE, "Link Status: DOWN->DOWN", + strlen("Link Status: DOWN->DOWN")); + } else { if (old_link_down_cnt != fnic->link_down_cnt) { /* UP -> DOWN -> UP */ fnic->lport->host_stats.link_failure_count++; spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, + "Link Status:UP_DOWN_UP", + strlen("Link_Status:UP_DOWN_UP") + ); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); fcoe_ctlr_link_down(&fnic->ctlr); if (fnic->config.flags & VFCF_FIP_CAPABLE) { /* start FCoE VLAN discovery */ + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, + "Link Status: UP_DOWN_UP_VLAN", + strlen( + "Link Status: UP_DOWN_UP_VLAN") + ); fnic_fcoe_send_vlan_req(fnic); return; } @@ -88,22 +104,36 @@ void fnic_handle_link(struct work_struct *work) } else /* UP -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: UP_UP", + strlen("Link Status: UP_UP")); } } else if (fnic->link_status) { /* DOWN -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (fnic->config.flags & VFCF_FIP_CAPABLE) { /* start FCoE VLAN discovery */ + fnic_fc_trace_set_data( + fnic->lport->host->host_no, + FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", + strlen("Link Status: DOWN_UP_VLAN")); fnic_fcoe_send_vlan_req(fnic); return; } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); + fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); fcoe_ctlr_link_up(&fnic->ctlr); } else { /* UP -> DOWN */ fnic->lport->host_stats.link_failure_count++; spin_unlock_irqrestore(&fnic->fnic_lock, flags); FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); + fnic_fc_trace_set_data( + fnic->lport->host->host_no, FNIC_FC_LE, + "Link Status: UP_DOWN", + strlen("Link Status: UP_DOWN")); fcoe_ctlr_link_down(&fnic->ctlr); } @@ -267,11 +297,6 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, if (desc->fip_dtype == FIP_DT_FLOGI) { - shost_printk(KERN_DEBUG, lport->host, - " FIP TYPE FLOGI: fab name:%llx " - "vfid:%d map:%x\n", - fip->sel_fcf->fabric_name, fip->sel_fcf->vfid, - fip->sel_fcf->fc_map); if (dlen < sizeof(*els) + sizeof(*fh) + 1) return 0; @@ -616,6 +641,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) "using UCSM\n"); goto drop; } + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } skb_queue_tail(&fnic->fip_frame_queue, skb); queue_work(fnic_fip_queue, &fnic->fip_frame_work); return 1; /* let caller know packet was used */ @@ -844,6 +873,10 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc } fr_dev(fp) = fnic->lport; spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, + (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } skb_queue_tail(&fnic->frame_queue, skb); queue_work(fnic_event_queue, &fnic->frame_work); @@ -951,6 +984,15 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + } else { + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, + FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } } pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); @@ -1023,6 +1065,11 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); + if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, + (char *)eth_hdr, tot_len)) != 0) { + printk(KERN_ERR "fnic ctlr frame trace error!!!"); + } + spin_lock_irqsave(&fnic->wq_lock[0], flags); if (!vnic_wq_desc_avail(wq)) { diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 33e4ec2bfe73..8c56fdc3a456 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -74,6 +74,11 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " "for fnic trace buffer"); +unsigned int fnic_fc_trace_max_pages = 64; +module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_fc_trace_max_pages, + "Total allocated memory pages for fc trace buffer"); + static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); @@ -111,7 +116,7 @@ static struct scsi_host_template fnic_host_template = { .change_queue_type = fc_change_queue_type, .this_id = -1, .cmd_per_lun = 3, - .can_queue = FNIC_MAX_IO_REQ, + .can_queue = FNIC_DFLT_IO_REQ, .use_clustering = ENABLE_CLUSTERING, .sg_tablesize = FNIC_MAX_SG_DESC_CNT, .max_sectors = 0xffff, @@ -773,6 +778,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); + fnic->ctlr.state = FIP_ST_NON_FIP; } fnic->state = FNIC_IN_FC_MODE; @@ -1033,11 +1039,20 @@ static int __init fnic_init_module(void) /* Allocate memory for trace buffer */ err = fnic_trace_buf_init(); if (err < 0) { - printk(KERN_ERR PFX "Trace buffer initialization Failed " - "Fnic Tracing utility is disabled\n"); + printk(KERN_ERR PFX + "Trace buffer initialization Failed. " + "Fnic Tracing utility is disabled\n"); fnic_trace_free(); } + /* Allocate memory for fc trace buffer */ + err = fnic_fc_trace_init(); + if (err < 0) { + printk(KERN_ERR PFX "FC trace buffer initialization Failed " + "FC frame tracing utility is disabled\n"); + fnic_fc_trace_free(); + } + /* Create a cache for allocation of default size sgls */ len = sizeof(struct fnic_dflt_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create @@ -1118,6 +1133,7 @@ err_create_fnic_sgl_slab_max: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); err_create_fnic_sgl_slab_dflt: fnic_trace_free(); + fnic_fc_trace_free(); fnic_debugfs_terminate(); return err; } @@ -1135,6 +1151,7 @@ static void __exit fnic_cleanup_module(void) kmem_cache_destroy(fnic_io_req_cache); fc_release_transport(fnic_fc_transport); fnic_trace_free(); + fnic_fc_trace_free(); fnic_debugfs_terminate(); } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 0521436d05d6..ea28b5ca4c73 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -1312,8 +1312,9 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) cleanup_scsi_cmd: sc->result = DID_TRANSPORT_DISRUPTED << 16; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" - " DID_TRANSPORT_DISRUPTED\n"); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, + "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n", + __func__, (jiffies - start_time)); if (atomic64_read(&fnic->io_cmpl_skip)) atomic64_dec(&fnic->io_cmpl_skip); @@ -1733,6 +1734,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) struct fnic_stats *fnic_stats; struct abort_stats *abts_stats; struct terminate_stats *term_stats; + enum fnic_ioreq_state old_ioreq_state; int tag; DECLARE_COMPLETION_ONSTACK(tm_done); @@ -1793,6 +1795,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) * the completion wont be done till mid-layer, since abort * has already started. */ + old_ioreq_state = CMD_STATE(sc); CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; @@ -1816,6 +1819,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req, fc_lun.scsi_lun, io_req)) { spin_lock_irqsave(io_lock, flags); + if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) + CMD_STATE(sc) = old_ioreq_state; io_req = (struct fnic_io_req *)CMD_SP(sc); if (io_req) io_req->abts_done = NULL; @@ -1859,12 +1864,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { spin_unlock_irqrestore(io_lock, flags); if (task_req == FCPIO_ITMF_ABT_TASK) { - FNIC_SCSI_DBG(KERN_INFO, - fnic->lport->host, "Abort Driver Timeout\n"); atomic64_inc(&abts_stats->abort_drv_timeouts); } else { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, - "Terminate Driver Timeout\n"); atomic64_inc(&term_stats->terminate_drv_timeouts); } CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index e002e7187dc0..c77285926827 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -20,6 +20,7 @@ #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> +#include <linux/time.h> #include "fnic_io.h" #include "fnic.h" @@ -32,6 +33,16 @@ static DEFINE_SPINLOCK(fnic_trace_lock); static fnic_trace_dbg_t fnic_trace_entries; int fnic_tracing_enabled = 1; +/* static char *fnic_fc_ctlr_trace_buf_p; */ + +static int fc_trace_max_entries; +static unsigned long fnic_fc_ctlr_trace_buf_p; +static fnic_trace_dbg_t fc_trace_entries; +int fnic_fc_tracing_enabled = 1; +int fnic_fc_trace_cleared = 1; +static DEFINE_SPINLOCK(fnic_fc_trace_lock); + + /* * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information * @@ -428,10 +439,10 @@ int fnic_trace_buf_init(void) } err = fnic_trace_debugfs_init(); if (err < 0) { - printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n"); + pr_err("fnic: Failed to initialize debugfs for tracing\n"); goto err_fnic_trace_debugfs_init; } - printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n"); + pr_info("fnic: Successfully Initialized Trace Buffer\n"); return err; err_fnic_trace_debugfs_init: fnic_trace_free(); @@ -456,3 +467,314 @@ void fnic_trace_free(void) } printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n"); } + +/* + * fnic_fc_ctlr_trace_buf_init - + * Initialize trace buffer to log fnic control frames + * Description: + * Initialize trace buffer data structure by allocating + * required memory for trace data as well as for Indexes. + * Frame size is 256 bytes and + * memory is allocated for 1024 entries of 256 bytes. + * Page_offset(Index) is set to the address of trace entry + * and page_offset is initialized by adding frame size + * to the previous page_offset entry. + */ + +int fnic_fc_trace_init(void) +{ + unsigned long fc_trace_buf_head; + int err = 0; + int i; + + fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ + FC_TRC_SIZE_BYTES; + fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc( + fnic_fc_trace_max_pages * PAGE_SIZE); + if (!fnic_fc_ctlr_trace_buf_p) { + pr_err("fnic: Failed to allocate memory for " + "FC Control Trace Buf\n"); + err = -ENOMEM; + goto err_fnic_fc_ctlr_trace_buf_init; + } + + memset((void *)fnic_fc_ctlr_trace_buf_p, 0, + fnic_fc_trace_max_pages * PAGE_SIZE); + + /* Allocate memory for page offset */ + fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries * + sizeof(unsigned long)); + if (!fc_trace_entries.page_offset) { + pr_err("fnic:Failed to allocate memory for page_offset\n"); + if (fnic_fc_ctlr_trace_buf_p) { + pr_err("fnic: Freeing FC Control Trace Buf\n"); + vfree((void *)fnic_fc_ctlr_trace_buf_p); + fnic_fc_ctlr_trace_buf_p = 0; + } + err = -ENOMEM; + goto err_fnic_fc_ctlr_trace_buf_init; + } + memset((void *)fc_trace_entries.page_offset, 0, + (fc_trace_max_entries * sizeof(unsigned long))); + + fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; + fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; + + /* + * Set up fc_trace_entries.page_offset field with memory location + * for every trace entry + */ + for (i = 0; i < fc_trace_max_entries; i++) { + fc_trace_entries.page_offset[i] = fc_trace_buf_head; + fc_trace_buf_head += FC_TRC_SIZE_BYTES; + } + err = fnic_fc_trace_debugfs_init(); + if (err < 0) { + pr_err("fnic: Failed to initialize FC_CTLR tracing.\n"); + goto err_fnic_fc_ctlr_trace_debugfs_init; + } + pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); + return err; + +err_fnic_fc_ctlr_trace_debugfs_init: + fnic_fc_trace_free(); +err_fnic_fc_ctlr_trace_buf_init: + return err; +} + +/* + * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures. + */ +void fnic_fc_trace_free(void) +{ + fnic_fc_tracing_enabled = 0; + fnic_fc_trace_debugfs_terminate(); + if (fc_trace_entries.page_offset) { + vfree((void *)fc_trace_entries.page_offset); + fc_trace_entries.page_offset = NULL; + } + if (fnic_fc_ctlr_trace_buf_p) { + vfree((void *)fnic_fc_ctlr_trace_buf_p); + fnic_fc_ctlr_trace_buf_p = 0; + } + pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n"); +} + +/* + * fnic_fc_ctlr_set_trace_data: + * Maintain rd & wr idx accordingly and set data + * Passed parameters: + * host_no: host number accociated with fnic + * frame_type: send_frame, rece_frame or link event + * fc_frame: pointer to fc_frame + * frame_len: Length of the fc_frame + * Description: + * This routine will get next available wr_idx and + * copy all passed trace data to the buffer pointed by wr_idx + * and increment wr_idx. It will also make sure that we dont + * overwrite the entry which we are reading and also + * wrap around if we reach the maximum entries. + * Returned Value: + * It will return 0 for success or -1 for failure + */ +int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, + char *frame, u32 fc_trc_frame_len) +{ + unsigned long flags; + struct fc_trace_hdr *fc_buf; + unsigned long eth_fcoe_hdr_len; + char *fc_trace; + + if (fnic_fc_tracing_enabled == 0) + return 0; + + spin_lock_irqsave(&fnic_fc_trace_lock, flags); + + if (fnic_fc_trace_cleared == 1) { + fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; + pr_info("fnic: Reseting the read idx\n"); + memset((void *)fnic_fc_ctlr_trace_buf_p, 0, + fnic_fc_trace_max_pages * PAGE_SIZE); + fnic_fc_trace_cleared = 0; + } + + fc_buf = (struct fc_trace_hdr *) + fc_trace_entries.page_offset[fc_trace_entries.wr_idx]; + + fc_trace_entries.wr_idx++; + + if (fc_trace_entries.wr_idx >= fc_trace_max_entries) + fc_trace_entries.wr_idx = 0; + + if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { + fc_trace_entries.rd_idx++; + if (fc_trace_entries.rd_idx >= fc_trace_max_entries) + fc_trace_entries.rd_idx = 0; + } + + fc_buf->time_stamp = CURRENT_TIME; + fc_buf->host_no = host_no; + fc_buf->frame_type = frame_type; + + fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf); + + /* During the receive path, we do not have eth hdr as well as fcoe hdr + * at trace entry point so we will stuff 0xff just to make it generic. + */ + if (frame_type == FNIC_FC_RECV) { + eth_fcoe_hdr_len = sizeof(struct ethhdr) + + sizeof(struct fcoe_hdr); + fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len; + memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); + /* Copy the rest of data frame */ + memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, + min_t(u8, fc_trc_frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); + } else { + memcpy((char *)fc_trace, (void *)frame, + min_t(u8, fc_trc_frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); + } + + /* Store the actual received length */ + fc_buf->frame_len = fc_trc_frame_len; + + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return 0; +} + +/* + * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file + * Passed parameter: + * @fnic_dbgfs_t: pointer to debugfs trace buffer + * rdata_flag: 1 => Unformated file + * 0 => formated file + * Description: + * This routine will copy the trace data to memory file with + * proper formatting and also copy to another memory + * file without formatting for further procesing. + * Retrun Value: + * Number of bytes that were dumped into fnic_dbgfs_t + */ + +int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag) +{ + int rd_idx, wr_idx; + unsigned long flags; + int len = 0, j; + struct fc_trace_hdr *tdata; + char *fc_trace; + + spin_lock_irqsave(&fnic_fc_trace_lock, flags); + if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + pr_info("fnic: Buffer is empty\n"); + return 0; + } + rd_idx = fc_trace_entries.rd_idx; + wr_idx = fc_trace_entries.wr_idx; + if (rdata_flag == 0) { + len += snprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + "Time Stamp (UTC)\t\t" + "Host No: F Type: len: FCoE_FRAME:\n"); + } + + while (rd_idx != wr_idx) { + tdata = (struct fc_trace_hdr *) + fc_trace_entries.page_offset[rd_idx]; + if (!tdata) { + pr_info("fnic: Rd data is NULL\n"); + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return 0; + } + if (rdata_flag == 0) { + copy_and_format_trace_data(tdata, + fnic_dbgfs_prt, &len, rdata_flag); + } else { + fc_trace = (char *)tdata; + for (j = 0; j < FC_TRC_SIZE_BYTES; j++) { + len += snprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) + - len, "%02x", fc_trace[j] & 0xff); + } /* for loop */ + len += snprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + "\n"); + } + rd_idx++; + if (rd_idx > (fc_trace_max_entries - 1)) + rd_idx = 0; + } + + spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); + return len; +} + +/* + * copy_and_format_trace_data: Copy formatted data to char * buffer + * Passed Parameter: + * @fc_trace_hdr_t: pointer to trace data + * @fnic_dbgfs_t: pointer to debugfs trace buffer + * @orig_len: pointer to len + * rdata_flag: 0 => Formated file, 1 => Unformated file + * Description: + * This routine will format and copy the passed trace data + * for formated file or unformated file accordingly. + */ + +void copy_and_format_trace_data(struct fc_trace_hdr *tdata, + fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len, + u8 rdata_flag) +{ + struct tm tm; + int j, i = 1, len; + char *fc_trace, *fmt; + int ethhdr_len = sizeof(struct ethhdr) - 1; + int fcoehdr_len = sizeof(struct fcoe_hdr); + int fchdr_len = sizeof(struct fc_frame_header); + int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3; + + tdata->frame_type = tdata->frame_type & 0x7F; + + len = *orig_len; + + time_to_tm(tdata->time_stamp.tv_sec, 0, &tm); + + fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; + len += snprintf(fnic_dbgfs_prt->buffer + len, + (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, + fmt, + tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, + tm.tm_hour, tm.tm_min, tm.tm_sec, + tdata->time_stamp.tv_nsec, tdata->host_no, + tdata->frame_type, tdata->frame_len); + + fc_trace = (char *)FC_TRACE_ADDRESS(tdata); + + for (j = 0; j < min_t(u8, tdata->frame_len, + (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) { + if (tdata->frame_type == FNIC_FC_LE) { + len += snprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "%c", fc_trace[j]); + } else { + len += snprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "%02x", fc_trace[j] & 0xff); + len += snprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, " "); + if (j == ethhdr_len || + j == ethhdr_len + fcoehdr_len || + j == ethhdr_len + fcoehdr_len + fchdr_len || + (i > 3 && j%fchdr_len == 0)) { + len += snprintf(fnic_dbgfs_prt->buffer + + len, (fnic_fc_trace_max_pages + * PAGE_SIZE * 3) - len, + "\n\t\t\t\t\t\t\t\t"); + i++; + } + } /* end of else*/ + } /* End of for loop*/ + len += snprintf(fnic_dbgfs_prt->buffer + len, + max_size - len, "\n"); + *orig_len = len; +} diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h index d412f2ee3c4f..a8aa0578fcb0 100644 --- a/drivers/scsi/fnic/fnic_trace.h +++ b/drivers/scsi/fnic/fnic_trace.h @@ -19,6 +19,17 @@ #define __FNIC_TRACE_H__ #define FNIC_ENTRY_SIZE_BYTES 64 +#define FC_TRC_SIZE_BYTES 256 +#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr) + +/* + * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type + * of frame 1 => Eth frame, 0=> FC frame + */ + +#define FNIC_FC_RECV 0x52 /* Character R */ +#define FNIC_FC_SEND 0x54 /* Character T */ +#define FNIC_FC_LE 0x4C /* Character L */ extern ssize_t simple_read_from_buffer(void __user *to, size_t count, @@ -30,6 +41,10 @@ extern unsigned int fnic_trace_max_pages; extern int fnic_tracing_enabled; extern unsigned int trace_max_pages; +extern unsigned int fnic_fc_trace_max_pages; +extern int fnic_fc_tracing_enabled; +extern int fnic_fc_trace_cleared; + typedef struct fnic_trace_dbg { int wr_idx; int rd_idx; @@ -56,6 +71,16 @@ struct fnic_trace_data { typedef struct fnic_trace_data fnic_trace_data_t; +struct fc_trace_hdr { + struct timespec time_stamp; + u32 host_no; + u8 frame_type; + u8 frame_len; +} __attribute__((__packed__)); + +#define FC_TRACE_ADDRESS(a) \ + ((unsigned long)(a) + sizeof(struct fc_trace_hdr)) + #define FNIC_TRACE_ENTRY_SIZE \ (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t)) @@ -88,4 +113,17 @@ int fnic_debugfs_init(void); void fnic_debugfs_terminate(void); int fnic_trace_debugfs_init(void); void fnic_trace_debugfs_terminate(void); + +/* Fnic FC CTLR Trace releated function */ +int fnic_fc_trace_init(void); +void fnic_fc_trace_free(void); +int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, + char *frame, u32 fc_frame_len); +int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag); +void copy_and_format_trace_data(struct fc_trace_hdr *tdata, + fnic_dbgfs_t *fnic_dbgfs_prt, + int *len, u8 rdata_flag); +int fnic_fc_trace_debugfs_init(void); +void fnic_fc_trace_debugfs_terminate(void); + #endif diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 7176365e916b..a1bc8ca958e1 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -78,10 +78,6 @@ * */ -/* - * $Log: generic_NCR5380.c,v $ - */ - /* settings for DTC3181E card with only Mustek scanner attached */ #define USLEEP #define USLEEP_POLL 1 diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 1bcdb7beb77b..703adf78e0b2 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -25,10 +25,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: generic_NCR5380.h,v $ - */ - #ifndef GENERIC_NCR5380_H #define GENERIC_NCR5380_H @@ -58,8 +54,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *); #define CAN_QUEUE 16 #endif -#ifndef HOSTS_C - #define __STRVAL(x) #x #define STRVAL(x) __STRVAL(x) @@ -131,7 +125,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *); #define BOARD_NCR53C400A 2 #define BOARD_DTC3181E 3 -#endif /* else def HOSTS_C */ #endif /* ndef ASM */ #endif /* GENERIC_NCR5380_H */ diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9a6e4a2cd072..31184b35370f 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -48,6 +48,7 @@ #include <linux/bitmap.h> #include <linux/atomic.h> #include <linux/jiffies.h> +#include <linux/percpu.h> #include <asm/div64.h> #include "hpsa_cmd.h" #include "hpsa.h" @@ -115,9 +116,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, @@ -165,9 +172,15 @@ static struct board_type products[] = { {0x21C3103C, "Smart Array", &SA5_access}, {0x21C4103C, "Smart Array", &SA5_access}, {0x21C5103C, "Smart Array", &SA5_access}, + {0x21C6103C, "Smart Array", &SA5_access}, {0x21C7103C, "Smart Array", &SA5_access}, {0x21C8103C, "Smart Array", &SA5_access}, {0x21C9103C, "Smart Array", &SA5_access}, + {0x21CA103C, "Smart Array", &SA5_access}, + {0x21CB103C, "Smart Array", &SA5_access}, + {0x21CC103C, "Smart Array", &SA5_access}, + {0x21CD103C, "Smart Array", &SA5_access}, + {0x21CE103C, "Smart Array", &SA5_access}, {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, @@ -181,7 +194,8 @@ static int number_of_controllers; static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); -static void start_io(struct ctlr_info *h); +static void lock_and_start_io(struct ctlr_info *h); +static void start_io(struct ctlr_info *h, unsigned long *flags); #ifdef CONFIG_COMPAT static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); @@ -683,7 +697,7 @@ static inline void addQ(struct list_head *list, struct CommandList *c) static inline u32 next_command(struct ctlr_info *h, u8 q) { u32 a; - struct reply_pool *rq = &h->reply_queue[q]; + struct reply_queue_buffer *rq = &h->reply_queue[q]; unsigned long flags; if (h->transMethod & CFGTBL_Trans_io_accel1) @@ -832,8 +846,8 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h, spin_lock_irqsave(&h->lock, flags); addQ(&h->reqQ, c); h->Qdepth++; + start_io(h, &flags); spin_unlock_irqrestore(&h->lock, flags); - start_io(h); } static inline void removeQ(struct CommandList *c) @@ -1542,9 +1556,13 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, dev_warn(&h->pdev->dev, "%s: task complete with check condition.\n", "HP SSD Smart Path"); + cmd->result |= SAM_STAT_CHECK_CONDITION; if (c2->error_data.data_present != - IOACCEL2_SENSE_DATA_PRESENT) + IOACCEL2_SENSE_DATA_PRESENT) { + memset(cmd->sense_buffer, 0, + SCSI_SENSE_BUFFERSIZE); break; + } /* copy the sense data */ data_len = c2->error_data.sense_data_len; if (data_len > SCSI_SENSE_BUFFERSIZE) @@ -1554,7 +1572,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h, sizeof(c2->error_data.sense_data_buff); memcpy(cmd->sense_buffer, c2->error_data.sense_data_buff, data_len); - cmd->result |= SAM_STAT_CHECK_CONDITION; retry = 1; break; case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: @@ -1639,16 +1656,6 @@ static void process_ioaccel2_completion(struct ctlr_info *h, if (is_logical_dev_addr_mode(dev->scsi3addr) && c2->error_data.serv_response == IOACCEL2_SERV_RESPONSE_FAILURE) { - if (c2->error_data.status == - IOACCEL2_STATUS_SR_IOACCEL_DISABLED) - dev_warn(&h->pdev->dev, - "%s: Path is unavailable, retrying on standard path.\n", - "HP SSD Smart Path"); - else - dev_warn(&h->pdev->dev, - "%s: Error 0x%02x, retrying on standard path.\n", - "HP SSD Smart Path", c2->error_data.status); - dev->offload_enabled = 0; h->drv_req_rescan = 1; /* schedule controller for a rescan */ cmd->result = DID_SOFT_ERROR << 16; @@ -1979,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, wait_for_completion(&wait); } +static u32 lockup_detected(struct ctlr_info *h) +{ + int cpu; + u32 rc, *lockup_detected; + + cpu = get_cpu(); + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); + rc = *lockup_detected; + put_cpu(); + return rc; +} + static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, struct CommandList *c) { - unsigned long flags; - /* If controller lockup detected, fake a hardware error. */ - spin_lock_irqsave(&h->lock, flags); - if (unlikely(h->lockup_detected)) { - spin_unlock_irqrestore(&h->lock, flags); + if (unlikely(lockup_detected(h))) c->err_info->CommandStatus = CMD_HARDWARE_ERR; - } else { - spin_unlock_irqrestore(&h->lock, flags); + else hpsa_scsi_do_simple_cmd_core(h, c); - } } #define MAX_DRIVER_CMD_RETRIES 25 @@ -2417,7 +2430,7 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, buflen = 16; buf = kzalloc(64, GFP_KERNEL); if (!buf) - return -1; + return -ENOMEM; rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); if (rc == 0) memcpy(device_id, &buf[8], buflen); @@ -2503,27 +2516,21 @@ static int hpsa_get_volume_status(struct ctlr_info *h, return HPSA_VPD_LV_STATUS_UNSUPPORTED; /* Does controller have VPD for logical volume status? */ - if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) { - dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n"); + if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) goto exit_failed; - } /* Get the size of the VPD return buffer */ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, buf, HPSA_VPD_HEADER_SZ); - if (rc != 0) { - dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); + if (rc != 0) goto exit_failed; - } size = buf[3]; /* Now get the whole VPD buffer */ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, buf, size + HPSA_VPD_HEADER_SZ); - if (rc != 0) { - dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n"); + if (rc != 0) goto exit_failed; - } status = buf[4]; /* status byte */ kfree(buf); @@ -2536,11 +2543,11 @@ exit_failed: /* Determine offline status of a volume. * Return either: * 0 (not offline) - * -1 (offline for unknown reasons) + * 0xff (offline for unknown reasons) * # (integer code indicating one of several NOT READY states * describing why a volume is to be kept offline) */ -static unsigned char hpsa_volume_offline(struct ctlr_info *h, +static int hpsa_volume_offline(struct ctlr_info *h, unsigned char scsi3addr[]) { struct CommandList *c; @@ -2639,11 +2646,15 @@ static int hpsa_update_device_info(struct ctlr_info *h, if (this_device->devtype == TYPE_DISK && is_logical_dev_addr_mode(scsi3addr)) { + int volume_offline; + hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) hpsa_get_ioaccel_status(h, scsi3addr, this_device); - this_device->volume_offline = - hpsa_volume_offline(h, scsi3addr); + volume_offline = hpsa_volume_offline(h, scsi3addr); + if (volume_offline < 0 || volume_offline > 0xff) + volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; + this_device->volume_offline = volume_offline & 0xff; } else { this_device->raid_level = RAID_UNKNOWN; this_device->offload_config = 0; @@ -2836,6 +2847,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, /* Get the list of physical devices */ physicals = kzalloc(reportsize, GFP_KERNEL); + if (physicals == NULL) + return 0; if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, reportsize, extended)) { dev_err(&h->pdev->dev, @@ -2847,26 +2860,20 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / responsesize; - /* find ioaccel2 handle in list of physicals: */ for (i = 0; i < nphysicals; i++) { + struct ext_report_lun_entry *entry = &physicals->LUN[i]; + /* handle is in bytes 28-31 of each lun */ - if (memcmp(&((struct ReportExtendedLUNdata *) - physicals)->LUN[i][20], &find, 4) != 0) { + if (entry->ioaccel_handle != find) continue; /* didn't match */ - } found = 1; - memcpy(scsi3addr, &((struct ReportExtendedLUNdata *) - physicals)->LUN[i][0], 8); + memcpy(scsi3addr, entry->lunid, 8); if (h->raid_offload_debug > 0) dev_info(&h->pdev->dev, - "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", __func__, find, - ((struct ReportExtendedLUNdata *) - physicals)->LUN[i][20], - scsi3addr[0], scsi3addr[1], scsi3addr[2], - scsi3addr[3], scsi3addr[4], scsi3addr[5], - scsi3addr[6], scsi3addr[7]); + entry->ioaccel_handle, scsi3addr); break; /* found it */ } @@ -2951,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, return RAID_CTLR_LUNID; if (i < logicals_start) - return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; + return &physdev_list->LUN[i - + (raid_ctlr_position == 0)].lunid[0]; if (i < last_device) return &logdev_list->LUN[i - nphysicals - @@ -2963,19 +2971,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, static int hpsa_hba_mode_enabled(struct ctlr_info *h) { int rc; + int hba_mode_enabled; struct bmic_controller_parameters *ctlr_params; ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), GFP_KERNEL); if (!ctlr_params) - return 0; + return -ENOMEM; rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, sizeof(struct bmic_controller_parameters)); - if (rc != 0) { + if (rc) { kfree(ctlr_params); - return 0; + return rc; } - return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0; + + hba_mode_enabled = + ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); + kfree(ctlr_params); + return hba_mode_enabled; } static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) @@ -3001,7 +3014,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24; int i, n_ext_target_devs, ndevs_to_allocate; int raid_ctlr_position; - u8 rescan_hba_mode; + int rescan_hba_mode; DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); @@ -3016,6 +3029,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) memset(lunzerobits, 0, sizeof(lunzerobits)); rescan_hba_mode = hpsa_hba_mode_enabled(h); + if (rescan_hba_mode < 0) + goto out; if (!h->hba_mode_enabled && rescan_hba_mode) dev_warn(&h->pdev->dev, "HBA mode enabled\n"); @@ -3053,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) ndev_allocated++; } - if (unlikely(is_scsi_rev_5(h))) + if (is_scsi_rev_5(h)) raid_ctlr_position = 0; else raid_ctlr_position = nphysicals + nlogicals; @@ -3950,7 +3965,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, struct hpsa_scsi_dev_t *dev; unsigned char scsi3addr[8]; struct CommandList *c; - unsigned long flags; int rc = 0; /* Get the ptr to our adapter structure out of cmd->host. */ @@ -3963,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, } memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); - spin_lock_irqsave(&h->lock, flags); - if (unlikely(h->lockup_detected)) { - spin_unlock_irqrestore(&h->lock, flags); + if (unlikely(lockup_detected(h))) { cmd->result = DID_ERROR << 16; done(cmd); return 0; } - spin_unlock_irqrestore(&h->lock, flags); c = cmd_alloc(h); if (c == NULL) { /* trouble... */ dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); @@ -4082,16 +4093,13 @@ static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) * we can prevent new rescan threads from piling up on a * locked up controller. */ - spin_lock_irqsave(&h->lock, flags); - if (unlikely(h->lockup_detected)) { - spin_unlock_irqrestore(&h->lock, flags); + if (unlikely(lockup_detected(h))) { spin_lock_irqsave(&h->scan_lock, flags); h->scan_finished = 1; wake_up_all(&h->scan_wait_queue); spin_unlock_irqrestore(&h->scan_lock, flags); return 1; } - spin_unlock_irqrestore(&h->lock, flags); return 0; } @@ -4942,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) return -EFAULT; - if (iocommand.Request.Type.Direction == XFER_WRITE) { + if (iocommand.Request.Type.Direction & XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { @@ -5005,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) rc = -EFAULT; goto out; } - if (iocommand.Request.Type.Direction == XFER_READ && + if ((iocommand.Request.Type.Direction & XFER_READ) && iocommand.buf_size > 0) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { @@ -5082,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) status = -ENOMEM; goto cleanup1; } - if (ioc->Request.Type.Direction == XFER_WRITE) { + if (ioc->Request.Type.Direction & XFER_WRITE) { if (copy_from_user(buff[sg_used], data_ptr, sz)) { status = -ENOMEM; goto cleanup1; @@ -5134,7 +5142,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) status = -EFAULT; goto cleanup0; } - if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { + if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { @@ -5438,13 +5446,12 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) /* Takes cmds off the submission queue and sends them to the hardware, * then puts them on the queue of cmds waiting for completion. + * Assumes h->lock is held */ -static void start_io(struct ctlr_info *h) +static void start_io(struct ctlr_info *h, unsigned long *flags) { struct CommandList *c; - unsigned long flags; - spin_lock_irqsave(&h->lock, flags); while (!list_empty(&h->reqQ)) { c = list_entry(h->reqQ.next, struct CommandList, list); /* can't do anything if fifo is full */ @@ -5467,14 +5474,20 @@ static void start_io(struct ctlr_info *h) * condition. */ h->commands_outstanding++; - if (h->commands_outstanding > h->max_outstanding) - h->max_outstanding = h->commands_outstanding; /* Tell the controller execute command */ - spin_unlock_irqrestore(&h->lock, flags); + spin_unlock_irqrestore(&h->lock, *flags); h->access.submit_command(h, c); - spin_lock_irqsave(&h->lock, flags); + spin_lock_irqsave(&h->lock, *flags); } +} + +static void lock_and_start_io(struct ctlr_info *h) +{ + unsigned long flags; + + spin_lock_irqsave(&h->lock, flags); + start_io(h, &flags); spin_unlock_irqrestore(&h->lock, flags); } @@ -5542,7 +5555,7 @@ static inline void finish_cmd(struct CommandList *c) else if (c->cmd_type == CMD_IOCTL_PEND) complete(c->waiting); if (unlikely(io_may_be_stalled)) - start_io(h); + lock_and_start_io(h); } static inline u32 hpsa_tag_contains_index(u32 tag) @@ -5819,12 +5832,12 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, dev_info(&pdev->dev, "using doorbell to reset controller\n"); writel(use_doorbell, vaddr + SA5_DOORBELL); - /* PMC hardware guys tell us we need a 5 second delay after + /* PMC hardware guys tell us we need a 10 second delay after * doorbell reset and before any attempt to talk to the board * at all to ensure that this actually works and doesn't fall * over in some weird corner cases. */ - msleep(5000); + msleep(10000); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power @@ -6145,6 +6158,8 @@ static void hpsa_interrupt_mode(struct ctlr_info *h) if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { dev_info(&h->pdev->dev, "MSIX\n"); h->msix_vector = MAX_REPLY_QUEUES; + if (h->msix_vector > num_online_cpus()) + h->msix_vector = num_online_cpus(); err = pci_enable_msix(h->pdev, hpsa_msix_entries, h->msix_vector); if (err > 0) { @@ -6594,6 +6609,17 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h) h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); } +static void hpsa_irq_affinity_hints(struct ctlr_info *h) +{ + int i, cpu, rc; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < h->msix_vector; i++) { + rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } +} + static int hpsa_request_irq(struct ctlr_info *h, irqreturn_t (*msixhandler)(int, void *), irqreturn_t (*intxhandler)(int, void *)) @@ -6613,6 +6639,7 @@ static int hpsa_request_irq(struct ctlr_info *h, rc = request_irq(h->intr[i], msixhandler, 0, h->devname, &h->q[i]); + hpsa_irq_affinity_hints(h); } else { /* Use single reply pool */ if (h->msix_vector > 0 || h->msi_vector) { @@ -6664,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h) if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { /* Single reply queue, only one irq to free */ i = h->intr_mode; + irq_set_affinity_hint(h->intr[i], NULL); free_irq(h->intr[i], &h->q[i]); return; } - for (i = 0; i < h->msix_vector; i++) + for (i = 0; i < h->msix_vector; i++) { + irq_set_affinity_hint(h->intr[i], NULL); free_irq(h->intr[i], &h->q[i]); + } } static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) @@ -6686,6 +6716,20 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) #endif /* CONFIG_PCI_MSI */ } +static void hpsa_free_reply_queues(struct ctlr_info *h) +{ + int i; + + for (i = 0; i < h->nreply_queues; i++) { + if (!h->reply_queue[i].head) + continue; + pci_free_consistent(h->pdev, h->reply_queue_size, + h->reply_queue[i].head, h->reply_queue[i].busaddr); + h->reply_queue[i].head = NULL; + h->reply_queue[i].busaddr = 0; + } +} + static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) { hpsa_free_irqs_and_disable_msix(h); @@ -6693,8 +6737,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) hpsa_free_cmd_pool(h); kfree(h->ioaccel1_blockFetchTable); kfree(h->blockFetchTable); - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); + hpsa_free_reply_queues(h); if (h->vaddr) iounmap(h->vaddr); if (h->transtable) @@ -6719,16 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list) } } +static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) +{ + int i, cpu; + + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < num_online_cpus(); i++) { + u32 *lockup_detected; + lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); + *lockup_detected = value; + cpu = cpumask_next(cpu, cpu_online_mask); + } + wmb(); /* be sure the per-cpu variables are out to memory */ +} + static void controller_lockup_detected(struct ctlr_info *h) { unsigned long flags; + u32 lockup_detected; h->access.set_intr_mask(h, HPSA_INTR_OFF); spin_lock_irqsave(&h->lock, flags); - h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); + if (!lockup_detected) { + /* no heartbeat, but controller gave us a zero. */ + dev_warn(&h->pdev->dev, + "lockup detected but scratchpad register is zero\n"); + lockup_detected = 0xffffffff; + } + set_lockup_detected_for_all_cpus(h, lockup_detected); spin_unlock_irqrestore(&h->lock, flags); dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", - h->lockup_detected); + lockup_detected); pci_disable_device(h->pdev); spin_lock_irqsave(&h->lock, flags); fail_all_cmds_on_list(h, &h->cmpQ); @@ -6863,7 +6928,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work) struct ctlr_info *h = container_of(to_delayed_work(work), struct ctlr_info, monitor_ctlr_work); detect_controller_lockup(h); - if (h->lockup_detected) + if (lockup_detected(h)) return; if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { @@ -6913,7 +6978,6 @@ reinit_after_soft_reset: * the 5 lower bits of the address are used by the hardware. and by * the driver. See comments in hpsa.h for more info. */ -#define COMMANDLIST_ALIGNMENT 128 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); h = kzalloc(sizeof(*h), GFP_KERNEL); if (!h) @@ -6928,6 +6992,13 @@ reinit_after_soft_reset: spin_lock_init(&h->offline_device_lock); spin_lock_init(&h->scan_lock); spin_lock_init(&h->passthru_count_lock); + + /* Allocate and clear per-cpu variable lockup_detected */ + h->lockup_detected = alloc_percpu(u32); + if (!h->lockup_detected) + goto clean1; + set_lockup_detected_for_all_cpus(h, 0); + rc = hpsa_pci_init(h); if (rc != 0) goto clean1; @@ -7051,6 +7122,8 @@ clean4: free_irqs(h); clean2: clean1: + if (h->lockup_detected) + free_percpu(h->lockup_detected); kfree(h); return rc; } @@ -7059,16 +7132,10 @@ static void hpsa_flush_cache(struct ctlr_info *h) { char *flush_buf; struct CommandList *c; - unsigned long flags; /* Don't bother trying to flush the cache if locked up */ - spin_lock_irqsave(&h->lock, flags); - if (unlikely(h->lockup_detected)) { - spin_unlock_irqrestore(&h->lock, flags); + if (unlikely(lockup_detected(h))) return; - } - spin_unlock_irqrestore(&h->lock, flags); - flush_buf = kzalloc(4, GFP_KERNEL); if (!flush_buf) return; @@ -7144,8 +7211,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); + hpsa_free_reply_queues(h); kfree(h->cmd_pool_bits); kfree(h->blockFetchTable); kfree(h->ioaccel1_blockFetchTable); @@ -7153,6 +7219,7 @@ static void hpsa_remove_one(struct pci_dev *pdev) kfree(h->hba_inquiry_data); pci_disable_device(pdev); pci_release_regions(pdev); + free_percpu(h->lockup_detected); kfree(h); } @@ -7257,8 +7324,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) * 10 = 6 s/g entry or 24k */ + /* If the controller supports either ioaccel method then + * we can also use the RAID stack submit path that does not + * perform the superfluous readl() after each command submission. + */ + if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) + access = SA5_performant_access_no_read; + /* Controller spec: zero out this buffer. */ - memset(h->reply_pool, 0, h->reply_pool_size); + for (i = 0; i < h->nreply_queues; i++) + memset(h->reply_queue[i].head, 0, h->reply_queue_size); bft[7] = SG_ENTRIES_IN_CMD + 4; calc_bucket_map(bft, ARRAY_SIZE(bft), @@ -7274,8 +7349,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) for (i = 0; i < h->nreply_queues; i++) { writel(0, &h->transtable->RepQAddr[i].upper); - writel(h->reply_pool_dhandle + - (h->max_commands * sizeof(u64) * i), + writel(h->reply_queue[i].busaddr, &h->transtable->RepQAddr[i].lower); } @@ -7323,8 +7397,10 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) h->ioaccel1_blockFetchTable); /* initialize all reply queue entries to unused */ - memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED, - h->reply_pool_size); + for (i = 0; i < h->nreply_queues; i++) + memset(h->reply_queue[i].head, + (u8) IOACCEL_MODE1_REPLY_UNUSED, + h->reply_queue_size); /* set all the constant fields in the accelerator command * frames once at init time to save CPU cycles later. @@ -7386,7 +7462,6 @@ static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) * because the 7 lower bits of the address are used by the * hardware. */ -#define IOACCEL1_COMMANDLIST_ALIGNMENT 128 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % IOACCEL1_COMMANDLIST_ALIGNMENT); h->ioaccel_cmd_pool = @@ -7424,7 +7499,6 @@ static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; -#define IOACCEL2_COMMANDLIST_ALIGNMENT 128 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % IOACCEL2_COMMANDLIST_ALIGNMENT); h->ioaccel2_cmd_pool = @@ -7482,16 +7556,17 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) } } - /* TODO, check that this next line h->nreply_queues is correct */ h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; hpsa_get_max_perf_mode_cmds(h); /* Performant mode ring buffer and supporting data structures */ - h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; - h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, - &(h->reply_pool_dhandle)); + h->reply_queue_size = h->max_commands * sizeof(u64); for (i = 0; i < h->nreply_queues; i++) { - h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; + h->reply_queue[i].head = pci_alloc_consistent(h->pdev, + h->reply_queue_size, + &(h->reply_queue[i].busaddr)); + if (!h->reply_queue[i].head) + goto clean_up; h->reply_queue[i].size = h->max_commands; h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ h->reply_queue[i].current_entry = 0; @@ -7500,18 +7575,14 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * sizeof(u32)), GFP_KERNEL); - - if ((h->reply_pool == NULL) - || (h->blockFetchTable == NULL)) + if (!h->blockFetchTable) goto clean_up; hpsa_enter_performant_mode(h, trans_support); return; clean_up: - if (h->reply_pool) - pci_free_consistent(h->pdev, h->reply_pool_size, - h->reply_pool, h->reply_pool_dhandle); + hpsa_free_reply_queues(h); kfree(h->blockFetchTable); } diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 44235a27e1b6..24472cec7de3 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -57,11 +57,12 @@ struct hpsa_scsi_dev_t { }; -struct reply_pool { +struct reply_queue_buffer { u64 *head; size_t size; u8 wraparound; u32 current_entry; + dma_addr_t busaddr; }; #pragma pack(1) @@ -90,6 +91,7 @@ struct bmic_controller_parameters { u8 automatic_drive_slamming; u8 reserved1; u8 nvram_flags; +#define HBA_MODE_ENABLED_FLAG (1 << 3) u8 cache_nvram_flags; u8 drive_config_flags; u16 reserved2; @@ -115,11 +117,8 @@ struct ctlr_info { int nr_cmds; /* Number of commands allowed on this controller */ struct CfgTable __iomem *cfgtable; int interrupts_enabled; - int major; int max_commands; int commands_outstanding; - int max_outstanding; /* Debug */ - int usage_count; /* number of opens all all minor devices */ # define PERF_MODE_INT 0 # define DOORBELL_INT 1 # define SIMPLE_MODE_INT 2 @@ -176,11 +175,9 @@ struct ctlr_info { /* * Performant mode completion buffers */ - u64 *reply_pool; - size_t reply_pool_size; - struct reply_pool reply_queue[MAX_REPLY_QUEUES]; + size_t reply_queue_size; + struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES]; u8 nreply_queues; - dma_addr_t reply_pool_dhandle; u32 *blockFetchTable; u32 *ioaccel1_blockFetchTable; u32 *ioaccel2_blockFetchTable; @@ -195,7 +192,7 @@ struct ctlr_info { u64 last_heartbeat_timestamp; u32 heartbeat_sample_interval; atomic_t firmware_flash_in_progress; - u32 lockup_detected; + u32 *lockup_detected; struct delayed_work monitor_ctlr_work; int remove_in_progress; u32 fifo_recently_full; @@ -232,11 +229,9 @@ struct ctlr_info { #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31) #define RESCAN_REQUIRED_EVENT_BITS \ - (CTLR_STATE_CHANGE_EVENT | \ - CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ + (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ - CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL | \ CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) spinlock_t offline_device_lock; @@ -345,22 +340,23 @@ struct offline_device_entry { static void SA5_submit_command(struct ctlr_info *h, struct CommandList *c) { - dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, - c->Header.Tag.lower); writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); } +static void SA5_submit_command_no_read(struct ctlr_info *h, + struct CommandList *c) +{ + writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); +} + static void SA5_submit_command_ioaccel2(struct ctlr_info *h, struct CommandList *c) { - dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, - c->Header.Tag.lower); if (c->cmd_type == CMD_IOACCEL2) writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); else writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); - (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); } /* @@ -398,7 +394,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val) static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) { - struct reply_pool *rq = &h->reply_queue[q]; + struct reply_queue_buffer *rq = &h->reply_queue[q]; unsigned long flags, register_value = FIFO_EMPTY; /* msi auto clears the interrupt pending bit. */ @@ -477,7 +473,6 @@ static bool SA5_intr_pending(struct ctlr_info *h) { unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); - dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value); return register_value & SA5_INTR_PENDING; } @@ -514,7 +509,7 @@ static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) { u64 register_value; - struct reply_pool *rq = &h->reply_queue[q]; + struct reply_queue_buffer *rq = &h->reply_queue[q]; unsigned long flags; BUG_ON(q >= h->nreply_queues); @@ -572,6 +567,14 @@ static struct access_method SA5_performant_access = { SA5_performant_completed, }; +static struct access_method SA5_performant_access_no_read = { + SA5_submit_command_no_read, + SA5_performant_intr_mask, + SA5_fifo_full, + SA5_performant_intr_pending, + SA5_performant_completed, +}; + struct board_type { u32 board_id; char *product_name; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index b5cc7052339f..b5125dc31439 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -151,7 +151,7 @@ #define HPSA_VPD_HEADER_SZ 4 /* Logical volume states */ -#define HPSA_VPD_LV_STATUS_UNSUPPORTED -1 +#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff #define HPSA_LV_OK 0x0 #define HPSA_LV_UNDERGOING_ERASE 0x0F #define HPSA_LV_UNDERGOING_RPI 0x12 @@ -238,11 +238,21 @@ struct ReportLUNdata { u8 LUN[HPSA_MAX_LUN][8]; }; +struct ext_report_lun_entry { + u8 lunid[8]; + u8 wwid[8]; + u8 device_type; + u8 device_flags; + u8 lun_count; /* multi-lun device, how many luns */ + u8 redundant_paths; + u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ +}; + struct ReportExtendedLUNdata { u8 LUNListLength[4]; u8 extended_response_flag; u8 reserved[3]; - u8 LUN[HPSA_MAX_LUN][24]; + struct ext_report_lun_entry LUN[HPSA_MAX_LUN]; }; struct SenseSubsystem_info { @@ -375,6 +385,7 @@ struct ctlr_info; /* defined in hpsa.h */ * or a bus address. */ +#define COMMANDLIST_ALIGNMENT 128 struct CommandList { struct CommandListHeader Header; struct RequestBlock Request; @@ -389,21 +400,7 @@ struct CommandList { struct list_head list; struct completion *waiting; void *scsi_cmd; - -/* on 64 bit architectures, to get this to be 32-byte-aligned - * it so happens we need PAD_64 bytes of padding, on 32 bit systems, - * we need PAD_32 bytes of padding (see below). This does that. - * If it happens that 64 bit and 32 bit systems need different - * padding, PAD_32 and PAD_64 can be set independently, and. - * the code below will do the right thing. - */ -#define IS_32_BIT ((8 - sizeof(long))/4) -#define IS_64_BIT (!IS_32_BIT) -#define PAD_32 (40) -#define PAD_64 (12) -#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) - u8 pad[COMMANDLIST_PAD]; -}; +} __aligned(COMMANDLIST_ALIGNMENT); /* Max S/G elements in I/O accelerator command */ #define IOACCEL1_MAXSGENTRIES 24 @@ -413,6 +410,7 @@ struct CommandList { * Structure for I/O accelerator (mode 1) commands. * Note that this structure must be 128-byte aligned in size. */ +#define IOACCEL1_COMMANDLIST_ALIGNMENT 128 struct io_accel1_cmd { u16 dev_handle; /* 0x00 - 0x01 */ u8 reserved1; /* 0x02 */ @@ -440,12 +438,7 @@ struct io_accel1_cmd { struct vals32 host_addr; /* 0x70 - 0x77 */ u8 CISS_LUN[8]; /* 0x78 - 0x7F */ struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; -#define IOACCEL1_PAD_64 0 -#define IOACCEL1_PAD_32 0 -#define IOACCEL1_PAD (IS_32_BIT * IOACCEL1_PAD_32 + \ - IS_64_BIT * IOACCEL1_PAD_64) - u8 pad[IOACCEL1_PAD]; -}; +} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); #define IOACCEL1_FUNCTION_SCSIIO 0x00 #define IOACCEL1_SGLOFFSET 32 @@ -510,14 +503,11 @@ struct io_accel2_scsi_response { u8 sense_data_buff[32]; /* sense/response data buffer */ }; -#define IOACCEL2_64_PAD 76 -#define IOACCEL2_32_PAD 76 -#define IOACCEL2_PAD (IS_32_BIT * IOACCEL2_32_PAD + \ - IS_64_BIT * IOACCEL2_64_PAD) /* * Structure for I/O accelerator (mode 2 or m2) commands. * Note that this structure must be 128-byte aligned in size. */ +#define IOACCEL2_COMMANDLIST_ALIGNMENT 128 struct io_accel2_cmd { u8 IU_type; /* IU Type */ u8 direction; /* direction, memtype, and encryption */ @@ -544,8 +534,7 @@ struct io_accel2_cmd { u32 tweak_upper; /* Encryption tweak, upper 4 bytes */ struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; struct io_accel2_scsi_response error_data; - u8 pad[IOACCEL2_PAD]; -}; +} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); /* * defines for Mode 2 command struct @@ -636,7 +625,7 @@ struct TransTable_struct { u32 RepQCount; u32 RepQCtrAddrLow32; u32 RepQCtrAddrHigh32; -#define MAX_REPLY_QUEUES 8 +#define MAX_REPLY_QUEUES 64 struct vals32 RepQAddr[MAX_REPLY_QUEUES]; }; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 23f5ba5e6472..8dd47689d584 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4515,7 +4515,7 @@ static int ibmvfc_work(void *data) struct ibmvfc_host *vhost = data; int rc; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); while (1) { rc = wait_event_interruptible(vhost->work_wait_q, diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index fa764406df68..7b23f21f22f1 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) if (crq->valid & 0x80) { if (++queue->cur == queue->size) queue->cur = 0; + + /* Ensure the read of the valid bit occurs before reading any + * other bits of the CRQ entry + */ + rmb(); } else crq = NULL; spin_unlock_irqrestore(&queue->lock, flags); @@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata, { struct vio_dev *vdev = to_vio_dev(hostdata->dev); + /* + * Ensure the command buffer is flushed to memory before handing it + * over to the VIOS to prevent it from fetching any stale data. + */ + mb(); return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); } @@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code) evt->hostdata->dev); if (evt->cmnd_done) evt->cmnd_done(evt->cmnd); - } else if (evt->done) + } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && + evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ) evt->done(evt); free_event_struct(&evt->hostdata->pool, evt); spin_lock_irqsave(hostdata->host->host_lock, flags); @@ -2213,7 +2224,7 @@ static int ibmvscsi_work(void *data) struct ibmvscsi_host_data *hostdata = data; int rc; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); while (1) { rc = wait_event_interruptible(hostdata->work_wait_q, diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 96a26f454673..cc51f38b116d 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1541,7 +1541,7 @@ void isci_remote_device_release(struct kref *kref) clear_bit(IDEV_STOP_PENDING, &idev->flags); clear_bit(IDEV_IO_READY, &idev->flags); clear_bit(IDEV_GONE, &idev->flags); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(IDEV_ALLOCATED, &idev->flags); wake_up(&ihost->eventq); } diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 11854845393b..a669f2d11c31 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -244,7 +244,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn) sk->sk_data_ready = tcp_sw_conn->old_data_ready; sk->sk_state_change = tcp_sw_conn->old_state_change; sk->sk_write_space = tcp_sw_conn->old_write_space; - sk->sk_no_check = 0; + sk->sk_no_check_tx = 0; write_unlock_bh(&sk->sk_callback_lock); } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 26dc005bb0f0..3d1bc67bac9d 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; struct iscsi_scsi_req *hdr; - unsigned hdrlength, cmd_len; + unsigned hdrlength, cmd_len, transfer_length; itt_t itt; int rc; @@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) task->protected = true; + transfer_length = scsi_transfer_length(sc); + hdr->data_length = cpu_to_be32(transfer_length); if (sc->sc_data_direction == DMA_TO_DEVICE) { - unsigned out_len = scsi_out(sc)->length; struct iscsi_r2t_info *r2t = &task->unsol_r2t; - hdr->data_length = cpu_to_be32(out_len); hdr->flags |= ISCSI_FLAG_CMD_WRITE; /* * Write counters: @@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) memset(r2t, 0, sizeof(*r2t)); if (session->imm_data_en) { - if (out_len >= session->first_burst) + if (transfer_length >= session->first_burst) task->imm_count = min(session->first_burst, conn->max_xmit_dlength); else - task->imm_count = min(out_len, - conn->max_xmit_dlength); + task->imm_count = min(transfer_length, + conn->max_xmit_dlength); hton24(hdr->dlength, task->imm_count); } else zero_data(hdr->dlength); if (!session->initial_r2t_en) { - r2t->data_length = min(session->first_burst, out_len) - + r2t->data_length = min(session->first_burst, + transfer_length) - task->imm_count; r2t->data_offset = task->imm_count; r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); @@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) } else { hdr->flags |= ISCSI_FLAG_CMD_FINAL; zero_data(hdr->dlength); - hdr->data_length = cpu_to_be32(scsi_in(sc)->length); if (sc->sc_data_direction == DMA_FROM_DEVICE) hdr->flags |= ISCSI_FLAG_CMD_READ; @@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) scsi_bidi_cmnd(sc) ? "bidirectional" : sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read", conn->id, sc, sc->cmnd[0], - task->itt, scsi_bufflen(sc), + task->itt, transfer_length, scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0, session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1); @@ -1442,9 +1442,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) conn->task = NULL; } /* regular RX path uses back_lock */ - spin_lock_bh(&conn->session->back_lock); + spin_lock(&conn->session->back_lock); __iscsi_put_task(task); - spin_unlock_bh(&conn->session->back_lock); + spin_unlock(&conn->session->back_lock); return rc; } diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 94a3cafe7197..434e9037908e 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -640,6 +640,7 @@ struct lpfc_hba { #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ #define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ +#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 8d5b6ceec9c9..1d7a5c34ee8c 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -919,10 +919,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) phba->cfg_sriov_nr_virtfn = 0; } + if (opcode == LPFC_FW_DUMP) + phba->hba_flag |= HBA_FW_DUMP_OP; + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); - if (status != 0) + if (status != 0) { + phba->hba_flag &= ~HBA_FW_DUMP_OP; return status; + } /* wait for the device to be quiesced before firmware reset */ msleep(100); @@ -2364,7 +2369,7 @@ lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, uint8_t wwpn[WWN_SZ]; int rc; - if (!phba->cfg_EnableXLane) + if (!phba->cfg_fof) return -EPERM; /* count may include a LF at end of string */ @@ -2432,7 +2437,7 @@ lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, uint8_t wwpn[WWN_SZ]; int rc; - if (!phba->cfg_EnableXLane) + if (!phba->cfg_fof) return -EPERM; /* count may include a LF at end of string */ @@ -2499,7 +2504,7 @@ lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; int val = 0; - if (!phba->cfg_EnableXLane) + if (!phba->cfg_fof) return -EPERM; if (!isdigit(buf[0])) @@ -2565,7 +2570,7 @@ lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], int rc = 0; - if (!phba->cfg_EnableXLane) + if (!phba->cfg_fof) return -EPERM; if (oas_state) { @@ -2670,7 +2675,7 @@ lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, uint64_t oas_lun; int len = 0; - if (!phba->cfg_EnableXLane) + if (!phba->cfg_fof) return -EPERM; if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) @@ -2716,7 +2721,7 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, uint64_t scsi_lun; ssize_t rc; - if (!phba->cfg_EnableXLane) + if (!phba->cfg_fof) return -EPERM; if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) @@ -4655,7 +4660,7 @@ LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); # 0x0 - 0x7f = CS_CTL field in FC header (high 7 bits) # Value range is [0x0,0x7f]. Default value is 0 */ -LPFC_ATTR_R(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); +LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); /* # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF) diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index ca2f4ea7cdef..5b5c825d9576 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2013 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index a94d4c9dfaa5..928ef609f363 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2010-2012 Emulex. All rights reserved. * + * Copyright (C) 2010-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index adda0bf7a244..db5604f01a1a 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -289,6 +289,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); +void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba); void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, @@ -310,6 +311,9 @@ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *, int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd); int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, uint64_t, lpfc_ctx_cmd); +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, + uint16_t, uint64_t, lpfc_ctx_cmd); void lpfc_mbox_timeout(unsigned long); void lpfc_mbox_timeout_handler(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 828c08e9389e..b0aedce3f54b 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2012 Emulex. All rights reserved. * + * Copyright (C) 2007-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -2314,7 +2314,7 @@ proc_cq: goto too_big; } - if (phba->cfg_EnableXLane) { + if (phba->cfg_fof) { /* OAS CQ */ qp = phba->sli4_hba.oas_cq; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 624fe0b3cc0b..7a5d81a65be8 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 59b51c529ba0..2a17e31265b8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -731,7 +731,7 @@ lpfc_do_work(void *p) struct lpfc_hba *phba = p; int rc; - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); current->flags |= PF_NOFREEZE; phba->data_flags = 0; @@ -5634,6 +5634,9 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->active_rrqs_xri_bitmap = mempool_alloc(vport->phba->active_rrq_pool, GFP_KERNEL); + if (ndlp->active_rrqs_xri_bitmap) + memset(ndlp->active_rrqs_xri_bitmap, 0, + ndlp->phba->cfg_rrq_xri_bitmap_sz); } diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 3d9438ce59ab..236259252379 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index fd79f7de7666..f432ec180cf8 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2013 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 635eeb3d6987..06f9a5b79e66 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -820,57 +820,153 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) } /** - * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free + * rspiocb which got deferred + * * @phba: pointer to lpfc HBA data structure. * - * This routine will do uninitialization after the HBA is reset when bring - * down the SLI Layer. + * This routine will cleanup completed slow path events after HBA is reset + * when bringing down the SLI Layer. + * * * Return codes - * 0 - success. - * Any other value - error. + * void. **/ -static int -lpfc_hba_down_post_s3(struct lpfc_hba *phba) +static void +lpfc_sli4_free_sp_events(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *rspiocbq; + struct hbq_dmabuf *dmabuf; + struct lpfc_cq_event *cq_event; + + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_SP_QUEUE_EVT; + spin_unlock_irq(&phba->hbalock); + + while (!list_empty(&phba->sli4_hba.sp_queue_event)) { + /* Get the response iocb from the head of work queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_queue_event, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + + switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { + case CQE_CODE_COMPL_WQE: + rspiocbq = container_of(cq_event, struct lpfc_iocbq, + cq_event); + lpfc_sli_release_iocbq(phba, rspiocbq); + break; + case CQE_CODE_RECEIVE: + case CQE_CODE_RECEIVE_V1: + dmabuf = container_of(cq_event, struct hbq_dmabuf, + cq_event); + lpfc_in_buf_free(phba, &dmabuf->dbuf); + } + } +} + +/** + * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup posted ELS buffers after the HBA is reset + * when bringing down the SLI Layer. + * + * + * Return codes + * void. + **/ +static void +lpfc_hba_free_post_buf(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *mp, *next_mp; - LIST_HEAD(completions); - int i; + LIST_HEAD(buflist); + int count; if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) lpfc_sli_hbqbuf_free_all(phba); else { /* Cleanup preposted buffers on the ELS ring */ pring = &psli->ring[LPFC_ELS_RING]; - list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { + spin_lock_irq(&phba->hbalock); + list_splice_init(&pring->postbufq, &buflist); + spin_unlock_irq(&phba->hbalock); + + count = 0; + list_for_each_entry_safe(mp, next_mp, &buflist, list) { list_del(&mp->list); - pring->postbufq_cnt--; + count++; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } + + spin_lock_irq(&phba->hbalock); + pring->postbufq_cnt -= count; + spin_unlock_irq(&phba->hbalock); } +} + +/** + * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup the txcmplq after the HBA is reset when bringing + * down the SLI Layer. + * + * Return codes + * void + **/ +static void +lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + LIST_HEAD(completions); + int i; - spin_lock_irq(&phba->hbalock); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - + if (phba->sli_rev >= LPFC_SLI_REV4) + spin_lock_irq(&pring->ring_lock); + else + spin_lock_irq(&phba->hbalock); /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on txcmplq as it will NEVER complete. */ list_splice_init(&pring->txcmplq, &completions); - spin_unlock_irq(&phba->hbalock); + pring->txcmplq_cnt = 0; + + if (phba->sli_rev >= LPFC_SLI_REV4) + spin_unlock_irq(&pring->ring_lock); + else + spin_unlock_irq(&phba->hbalock); /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); - lpfc_sli_abort_iocb_ring(phba, pring); - spin_lock_irq(&phba->hbalock); } - spin_unlock_irq(&phba->hbalock); +} +/** + * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + int i; + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + * 0 - success. + * Any other value - error. + **/ +static int +lpfc_hba_down_post_s3(struct lpfc_hba *phba) +{ + lpfc_hba_free_post_buf(phba); + lpfc_hba_clean_txcmplq(phba); return 0; } @@ -890,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) { struct lpfc_scsi_buf *psb, *psb_next; LIST_HEAD(aborts); - int ret; unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; - ret = lpfc_hba_down_post_s3(phba); - if (ret) - return ret; + lpfc_hba_free_post_buf(phba); + lpfc_hba_clean_txcmplq(phba); + /* At this point in time the HBA is either reset or DOA. Either * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be * on the lpfc_sgl_list so that it can either be freed if the @@ -932,6 +1027,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + + lpfc_sli4_free_sp_events(phba); return 0; } @@ -1250,7 +1347,6 @@ static void lpfc_handle_deferred_eratt(struct lpfc_hba *phba) { uint32_t old_host_status = phba->work_hs; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli = &phba->sli; /* If the pci channel is offline, ignore possible errors, @@ -1279,8 +1375,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) * dropped by the firmware. Error iocb (I/O) on txcmplq and let the * SCSI layer retry it after re-establishing link. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); /* * There was a firmware error. Take the hba offline and then @@ -1348,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; uint32_t event_data; unsigned long temperature; struct temp_event temp_event_data; @@ -1400,8 +1494,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) * Error iocb (I/O) on txcmplq and let the SCSI layer * retry it after re-establishing link. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); /* * There was a firmware error. Take the hba offline and then @@ -1940,78 +2033,81 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) switch (dev_id) { case PCI_DEVICE_ID_FIREFLY: - m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP6000", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SUPERFLY: if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) - m = (typeof(m)){"LP7000", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP7000", "PCI", ""}; else - m = (typeof(m)){"LP7000E", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP7000E", "PCI", ""}; + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; break; case PCI_DEVICE_ID_DRAGONFLY: m = (typeof(m)){"LP8000", "PCI", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_CENTAUR: if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) - m = (typeof(m)){"LP9002", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP9002", "PCI", ""}; else - m = (typeof(m)){"LP9000", "PCI", - "Fibre Channel Adapter"}; + m = (typeof(m)){"LP9000", "PCI", ""}; + m.function = "Obsolete, Unsupported Fibre Channel Adapter"; break; case PCI_DEVICE_ID_RFLY: m = (typeof(m)){"LP952", "PCI", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PEGASUS: m = (typeof(m)){"LP9802", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_THOR: m = (typeof(m)){"LP10000", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_VIPER: m = (typeof(m)){"LPX1000", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PFLY: m = (typeof(m)){"LP982", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TFLY: m = (typeof(m)){"LP1050", "PCI-X", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS: m = (typeof(m)){"LP11000", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_SCSP: m = (typeof(m)){"LP11000-SP", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HELIOS_DCSP: m = (typeof(m)){"LP11002-SP", "PCI-X2", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE: - m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_SCSP: - m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1000-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_NEPTUNE_DCSP: - m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe1002-SP", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BMID: m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_BSMB: - m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP111", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_ZEPHYR: m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; @@ -2030,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP101: - m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP101", "PCI-X", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP10000S: - m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP10000-S", "PCI", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LP11000S: - m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; + m = (typeof(m)){"LP11000-S", "PCI-X2", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LPE11000S: - m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; + m = (typeof(m)){"LPe11000-S", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_SAT: m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; @@ -2060,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_HORNET: - m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; + m = (typeof(m)){"LP21000", "PCIe", + "Obsolete, Unsupported FCoE Adapter"}; GE = 1; break; case PCI_DEVICE_ID_PROTEUS_VF: m = (typeof(m)){"LPev12000", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_PF: m = (typeof(m)){"LPev12000", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)){"LPemv12002-S", "PCIe IOV", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_TIGERSHARK: oneConnect = 1; @@ -2089,17 +2190,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) break; case PCI_DEVICE_ID_BALIUS: m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", - "Fibre Channel Adapter"}; + "Obsolete, Unsupported Fibre Channel Adapter"}; break; case PCI_DEVICE_ID_LANCER_FC: - case PCI_DEVICE_ID_LANCER_FC_VF: m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"}; break; + case PCI_DEVICE_ID_LANCER_FC_VF: + m = (typeof(m)){"LPe16000", "PCIe", + "Obsolete, Unsupported Fibre Channel Adapter"}; + break; case PCI_DEVICE_ID_LANCER_FCOE: - case PCI_DEVICE_ID_LANCER_FCOE_VF: oneConnect = 1; m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; break; + case PCI_DEVICE_ID_LANCER_FCOE_VF: + oneConnect = 1; + m = (typeof(m)){"OCe15100", "PCIe", + "Obsolete, Unsupported FCoE"}; + break; case PCI_DEVICE_ID_SKYHAWK: case PCI_DEVICE_ID_SKYHAWK_VF: oneConnect = 1; @@ -4614,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba) phba->link_state = LPFC_HBA_ERROR; return; } - lpfc_offline_prep(phba, LPFC_MBX_WAIT); + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) + lpfc_offline_prep(phba, LPFC_MBX_WAIT); + else + lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); lpfc_offline(phba); lpfc_sli_brdrestart(phba); lpfc_online(phba); @@ -9663,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) static void lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2723 PCI channel I/O abort preparing for recovery\n"); @@ -9673,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); } /** @@ -10417,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) static void lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2828 PCI channel I/O abort preparing for recovery\n"); /* * There may be errored I/Os through HBA, abort all I/Os on txcmplq * and let the SCSI mid-layer to retry them to recover. */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); } /** @@ -10898,7 +11001,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba) if (phba->sli4_hba.pc_sli4_params.oas_supported) { phba->cfg_fof = 1; } else { - phba->cfg_EnableXLane = 0; + phba->cfg_fof = 0; if (phba->device_data_mem_pool) mempool_destroy(phba->device_data_mem_pool); phba->device_data_mem_pool = NULL; @@ -10928,7 +11031,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba) if (rc) return -ENOMEM; - if (phba->cfg_EnableXLane) { + if (phba->cfg_fof) { rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); @@ -10947,8 +11050,7 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba) return 0; out_oas_wq: - if (phba->cfg_EnableXLane) - lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); + lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); out_oas_cq: lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); return rc; @@ -10982,7 +11084,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba) phba->sli4_hba.fof_eq = qdesc; - if (phba->cfg_EnableXLane) { + if (phba->cfg_fof) { /* Create OAS CQ */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index ed419aad2b1f..3fa65338d3f5 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2012 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 462453ee0bda..2df11daad85b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -73,7 +73,7 @@ lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; - if (vport->phba->cfg_EnableXLane) + if (vport->phba->cfg_fof) return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; else return (struct lpfc_rport_data *)sdev->hostdata; @@ -3462,7 +3462,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * If the OAS driver feature is enabled and the lun is enabled for * OAS, set the oas iocb related flags. */ - if ((phba->cfg_EnableXLane) && ((struct lpfc_device_data *) + if ((phba->cfg_fof) && ((struct lpfc_device_data *) scsi_cmnd->device->hostdata)->oas_enabled) lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS; return 0; @@ -4314,6 +4314,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, fcp_cmnd->fcpCntl1 = SIMPLE_Q; sli4 = (phba->sli_rev == LPFC_SLI_REV4); + piocbq->iocb.un.fcpi.fcpi_XRdy = 0; /* * There are three possibilities here - use scatter-gather segment, use @@ -4782,7 +4783,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct lpfc_scsi_buf *lpfc_cmd; IOCB_t *cmd, *icmd; int ret = SUCCESS, status = 0; - unsigned long flags; + struct lpfc_sli_ring *pring_s4; + int ring_number, ret_val; + unsigned long flags, iflags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); status = fc_block_scsi_eh(cmnd); @@ -4833,6 +4836,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) BUG_ON(iocb->context1 != lpfc_cmd); + /* abort issued in recovery is still in progress */ + if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "3389 SCSI Layer I/O Abort Request is pending\n"); + spin_unlock_irqrestore(&phba->hbalock, flags); + goto wait_for_cmpl; + } + abtsiocb = __lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { ret = FAILED; @@ -4871,11 +4882,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; + if (phba->sli_rev == LPFC_SLI_REV4) { + ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx; + pring_s4 = &phba->sli.ring[ring_number]; + /* Note: both hbalock and ring_lock must be set here */ + spin_lock_irqsave(&pring_s4->ring_lock, iflags); + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, + abtsiocb, 0); + spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); + } else { + ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, + abtsiocb, 0); + } /* no longer need the lock after this point */ spin_unlock_irqrestore(&phba->hbalock, flags); - if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == - IOCB_ERROR) { + + if (ret_val == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; @@ -4885,12 +4908,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) lpfc_sli_handle_fast_ring_event(phba, &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); +wait_for_cmpl: lpfc_cmd->waitq = &waitq; /* Wait for abort to complete */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); + + spin_lock_irqsave(shost->host_lock, flags); lpfc_cmd->waitq = NULL; + spin_unlock_irqrestore(shost->host_lock, flags); if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; @@ -5172,8 +5199,9 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); if (cnt) - lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], - tgt_id, lun_id, context); + lpfc_sli_abort_taskmgmt(vport, + &phba->sli.ring[phba->sli.fcp_ring], + tgt_id, lun_id, context); later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies) && cnt) { schedule_timeout_uninterruptible(msecs_to_jiffies(20)); @@ -5491,7 +5519,7 @@ lpfc_slave_alloc(struct scsi_device *sdev) if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; - if (phba->cfg_EnableXLane) { + if (phba->cfg_fof) { /* * Check to see if the device data structure for the lun @@ -5616,7 +5644,7 @@ lpfc_slave_destroy(struct scsi_device *sdev) struct lpfc_device_data *device_data = sdev->hostdata; atomic_dec(&phba->sdev_cnt); - if ((phba->cfg_EnableXLane) && (device_data)) { + if ((phba->cfg_fof) && (device_data)) { spin_lock_irqsave(&phba->devicelock, flags); device_data->available = false; if (!device_data->oas_enabled) @@ -5655,7 +5683,7 @@ lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, int memory_flags; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || - !(phba->cfg_EnableXLane)) + !(phba->cfg_fof)) return NULL; /* Attempt to create the device data to contain lun info */ @@ -5693,7 +5721,7 @@ lpfc_delete_device_data(struct lpfc_hba *phba, { if (unlikely(!phba) || !lun_info || - !(phba->cfg_EnableXLane)) + !(phba->cfg_fof)) return; if (!list_empty(&lun_info->listentry)) @@ -5727,7 +5755,7 @@ __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, struct lpfc_device_data *lun_info; if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || - !phba->cfg_EnableXLane) + !phba->cfg_fof) return NULL; /* Check to see if the lun is already enabled for OAS. */ @@ -5789,7 +5817,7 @@ lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, !starting_lun || !found_vport_wwpn || !found_target_wwpn || !found_lun || !found_lun_status || (*starting_lun == NO_MORE_OAS_LUN) || - !phba->cfg_EnableXLane) + !phba->cfg_fof) return false; lun = *starting_lun; @@ -5873,7 +5901,7 @@ lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, unsigned long flags; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || - !phba->cfg_EnableXLane) + !phba->cfg_fof) return false; spin_lock_irqsave(&phba->devicelock, flags); @@ -5930,7 +5958,7 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, unsigned long flags; if (unlikely(!phba) || !vport_wwpn || !target_wwpn || - !phba->cfg_EnableXLane) + !phba->cfg_fof) return false; spin_lock_irqsave(&phba->devicelock, flags); diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 0120bfccf50b..0389ac1e7b83 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6bb51f8e3c1b..32ada0505576 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -265,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) return NULL; q->hba_index = idx; + + /* + * insert barrier for instruction interlock : data from the hardware + * must have the valid bit checked before it can be copied and acted + * upon. Given what was seen in lpfc_sli4_cq_get() of speculative + * instructions allowing action on content before valid bit checked, + * add barrier here as well. May not be needed as "content" is a + * single 32-bit entity here (vs multi word structure for cq's). + */ + mb(); return eqe; } @@ -370,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) cqe = q->qe[q->hba_index].cqe; q->hba_index = idx; + + /* + * insert barrier for instruction interlock : data from the hardware + * must have the valid bit checked before it can be copied and acted + * upon. Speculative instructions were allowing a bcopy at the start + * of lpfc_sli4_fp_handle_wcqe(), which is called immediately + * after our return, to copy data before the valid bit check above + * was done. As such, some of the copied data was stale. The barrier + * ensures the check is before any data is copied. + */ + mb(); return cqe; } @@ -3511,14 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) /* Error everything on txq and txcmplq * First do the txq. */ - spin_lock_irq(&phba->hbalock); - list_splice_init(&pring->txq, &completions); + if (phba->sli_rev >= LPFC_SLI_REV4) { + spin_lock_irq(&pring->ring_lock); + list_splice_init(&pring->txq, &completions); + pring->txq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_lock_irq(&phba->hbalock); + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_unlock_irq(&phba->hbalock); + } else { + spin_lock_irq(&phba->hbalock); + list_splice_init(&pring->txq, &completions); + pring->txq_cnt = 0; - spin_unlock_irq(&phba->hbalock); + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli_issue_abort_iotag(phba, pring, iocb); + spin_unlock_irq(&phba->hbalock); + } /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, @@ -3526,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) } /** + * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function aborts all iocbs in FCP rings and frees all the iocb + * objects in txq. This function issues an abort iocb for all the iocb commands + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before + * the return of this function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + uint32_t i; + + /* Look on all the FCP Rings for the iotag */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { + pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + lpfc_sli_abort_iocb_ring(phba, pring); + } + } else { + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); + } +} + + +/** * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring * @phba: Pointer to HBA context object. * @@ -3542,28 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) LIST_HEAD(txcmplq); struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; - - /* Currently, only one fcp ring */ - pring = &psli->ring[psli->fcp_ring]; + uint32_t i; spin_lock_irq(&phba->hbalock); - /* Retrieve everything on txq */ - list_splice_init(&pring->txq, &txq); - - /* Retrieve everything on the txcmplq */ - list_splice_init(&pring->txcmplq, &txcmplq); - /* Indicate the I/O queues are flushed */ phba->hba_flag |= HBA_FCP_IOQ_FLUSH; spin_unlock_irq(&phba->hbalock); - /* Flush the txq */ - lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, - IOERR_SLI_DOWN); + /* Look on all the FCP Rings for the iotag */ + if (phba->sli_rev >= LPFC_SLI_REV4) { + for (i = 0; i < phba->cfg_fcp_io_channel; i++) { + pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + + spin_lock_irq(&pring->ring_lock); + /* Retrieve everything on txq */ + list_splice_init(&pring->txq, &txq); + /* Retrieve everything on the txcmplq */ + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txq_cnt = 0; + pring->txcmplq_cnt = 0; + spin_unlock_irq(&pring->ring_lock); + + /* Flush the txq */ + lpfc_sli_cancel_iocbs(phba, &txq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + /* Flush the txcmpq */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, + IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } + } else { + pring = &psli->ring[psli->fcp_ring]; - /* Flush the txcmpq */ - lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, - IOERR_SLI_DOWN); + spin_lock_irq(&phba->hbalock); + /* Retrieve everything on txq */ + list_splice_init(&pring->txq, &txq); + /* Retrieve everything on the txcmplq */ + list_splice_init(&pring->txcmplq, &txcmplq); + pring->txq_cnt = 0; + pring->txcmplq_cnt = 0; + spin_unlock_irq(&phba->hbalock); + + /* Flush the txq */ + lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + /* Flush the txcmpq */ + lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, + IOERR_SLI_DOWN); + } } /** @@ -3966,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; uint16_t cfg_value; - int rc; + int rc = 0; /* Reset HBA */ lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0295 Reset HBA Data: x%x x%x\n", - phba->pport->port_state, psli->sli_flag); + "0295 Reset HBA Data: x%x x%x x%x\n", + phba->pport->port_state, psli->sli_flag, + phba->hba_flag); /* perform board reset */ phba->fc_eventTag = 0; @@ -3984,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) phba->fcf.fcf_flag = 0; spin_unlock_irq(&phba->hbalock); + /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ + if (phba->hba_flag & HBA_FW_DUMP_OP) { + phba->hba_flag &= ~HBA_FW_DUMP_OP; + return rc; + } + /* Now physically reset the device */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0389 Performing PCI function reset!\n"); @@ -4981,7 +5079,7 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) } while (++fcp_eqidx < phba->cfg_fcp_io_channel); } - if (phba->cfg_EnableXLane) + if (phba->cfg_fof) lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); if (phba->sli4_hba.hba_eq) { @@ -6701,7 +6799,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; MAILBOX_t *mb = &pmbox->u.mb; struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; /* If the mailbox completed, process the completion and return */ if (lpfc_sli4_process_missed_mbox_completions(phba)) @@ -6743,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + lpfc_sli_abort_fcp_rings(phba); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0345 Resetting board due to mailbox timeout\n"); @@ -8112,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, abort_tag = (uint32_t) iocbq->iotag; xritag = iocbq->sli4_xritag; wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ + wqe->generic.wqe_com.word10 = 0; /* words0-2 bpl convert bde */ if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / @@ -8618,8 +8715,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if ((piocb->iocb_flag & LPFC_IO_FCP) || (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & - LPFC_IO_OAS))) { + if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; } else { wq = phba->sli4_hba.oas_wq; @@ -8714,7 +8810,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, if (phba->sli_rev == LPFC_SLI_REV4) { if (piocb->iocb_flag & LPFC_IO_FCP) { - if (!phba->cfg_EnableXLane || (!(piocb->iocb_flag & + if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { if (unlikely(!phba->sli4_hba.fcp_wq)) return IOCB_ERROR; @@ -9149,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) pring->sli.sli3.next_cmdidx = 0; pring->sli.sli3.local_getidx = 0; pring->sli.sli3.cmdidx = 0; + pring->flag = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); @@ -9784,43 +9881,6 @@ abort_iotag_exit: } /** - * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * - * This function aborts all iocbs in the given ring and frees all the iocb - * objects in txq. This function issues abort iocbs unconditionally for all - * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed - * to complete before the return of this function. The caller is not required - * to hold any locks. - **/ -static void -lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) -{ - LIST_HEAD(completions); - struct lpfc_iocbq *iocb, *next_iocb; - - if (pring->ringno == LPFC_ELS_RING) - lpfc_fabric_abort_hba(phba); - - spin_lock_irq(&phba->hbalock); - - /* Take off all the iocbs on txq for cancelling */ - list_splice_init(&pring->txq, &completions); - pring->txq_cnt = 0; - - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli_abort_iotag_issue(phba, pring, iocb); - - spin_unlock_irq(&phba->hbalock); - - /* Cancel all the IOCBs from the completions list */ - lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, - IOERR_SLI_ABORTED); -} - -/** * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. * @phba: pointer to lpfc HBA data structure. * @@ -9835,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; - lpfc_sli_iocb_ring_abort(phba, pring); + lpfc_sli_abort_iocb_ring(phba, pring); } } @@ -10060,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, } /** + * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN + * @vport: Pointer to virtual port. + * @pring: Pointer to driver SLI ring object. + * @tgt_id: SCSI ID of the target. + * @lun_id: LUN ID of the scsi device. + * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. + * + * This function sends an abort command for every SCSI command + * associated with the given virtual port pending on the ring + * filtered by lpfc_sli_validate_fcp_iocb function. + * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the + * FCP iocbs associated with lun specified by tgt_id and lun_id + * parameters + * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the + * FCP iocbs associated with SCSI target specified by tgt_id parameter. + * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all + * FCP iocbs associated with virtual port. + * This function returns number of iocbs it aborted . + * This function is called with no locks held right after a taskmgmt + * command is sent. + **/ +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, + uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *abtsiocbq; + struct lpfc_iocbq *iocbq; + IOCB_t *icmd; + int sum, i, ret_val; + unsigned long iflags; + struct lpfc_sli_ring *pring_s4; + uint32_t ring_number; + + spin_lock_irq(&phba->hbalock); + + /* all I/Os are in process of being flushed */ + if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { + spin_unlock_irq(&phba->hbalock); + return 0; + } + sum = 0; + + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, + cmd) != 0) + continue; + + /* + * If the iocbq is already being aborted, don't take a second + * action, but do count it. + */ + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) + continue; + + /* issue ABTS for this IOCB based on iotag */ + abtsiocbq = __lpfc_sli_get_iocbq(phba); + if (abtsiocbq == NULL) + continue; + + icmd = &iocbq->iocb; + abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; + abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; + if (phba->sli_rev == LPFC_SLI_REV4) + abtsiocbq->iocb.un.acxri.abortIoTag = + iocbq->sli4_xritag; + else + abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; + abtsiocbq->iocb.ulpLe = 1; + abtsiocbq->iocb.ulpClass = icmd->ulpClass; + abtsiocbq->vport = vport; + + /* ABTS WQE must go to the same WQ as the WQE to be aborted */ + abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; + if (iocbq->iocb_flag & LPFC_IO_FCP) + abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; + + if (lpfc_is_link_up(phba)) + abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; + else + abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; + + /* Setup callback routine and issue the command. */ + abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; + + /* + * Indicate the IO is being aborted by the driver and set + * the caller's flag into the aborted IO. + */ + iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; + + if (phba->sli_rev == LPFC_SLI_REV4) { + ring_number = MAX_SLI3_CONFIGURED_RINGS + + iocbq->fcp_wqidx; + pring_s4 = &phba->sli.ring[ring_number]; + /* Note: both hbalock and ring_lock must be set here */ + spin_lock_irqsave(&pring_s4->ring_lock, iflags); + ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, + abtsiocbq, 0); + spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); + } else { + ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocbq, 0); + } + + + if (ret_val == IOCB_ERROR) + __lpfc_sli_release_iocbq(phba, abtsiocbq); + else + sum++; + } + spin_unlock_irq(&phba->hbalock); + return sum; +} + +/** * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler * @phba: Pointer to HBA context object. * @cmdiocbq: Pointer to command iocb. diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 6f04080f4ea8..edb48832c39b 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 9b8cda866176..7f50aa04d66a 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009-2013 Emulex. All rights reserved. * + * Copyright (C) 2009-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index e32cbec70324..41675c1193e7 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2013 Emulex. All rights reserved. * + * Copyright (C) 2004-2014 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.45" +#define LPFC_DRIVER_VERSION "10.2.8001.0." #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ @@ -30,4 +30,4 @@ #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2013 Emulex. All rights reserved." +#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex. All rights reserved." diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index f5cdc68cd5b6..6a039eb1cbce 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -25,10 +25,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: mac_NCR5380.c,v $ - */ - #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> @@ -58,12 +54,6 @@ #include "NCR5380.h" -#if 0 -#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION) -#else -#define NDEBUG (NDEBUG_ABORT) -#endif - #define RESET_BOOT #define DRIVER_SETUP diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h index 7dc62fce1c4c..06969b06e54b 100644 --- a/drivers/scsi/mac_scsi.h +++ b/drivers/scsi/mac_scsi.h @@ -22,10 +22,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: cumana_NCR5380.h,v $ - */ - #ifndef MAC_NCR5380_H #define MAC_NCR5380_H @@ -51,8 +47,6 @@ #include <scsi/scsicam.h> -#ifndef HOSTS_C - #define NCR5380_implementation_fields \ int port, ctrl @@ -75,10 +69,6 @@ #define NCR5380_show_info macscsi_show_info #define NCR5380_write_info macscsi_write_info -#define BOARD_NORMAL 0 -#define BOARD_NCR53C400 1 - -#endif /* ndef HOSTS_C */ #endif /* ndef ASM */ #endif /* MAC_NCR5380_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index d84d02c2aad9..112799b131a9 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -3061,7 +3061,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) u32 cur_state; u32 abs_state, curr_abs_state; - fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; + abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); + fw_state = abs_state & MFI_STATE_MASK; if (fw_state != MFI_STATE_READY) printk(KERN_INFO "megasas: Waiting for FW to come to ready" @@ -3069,9 +3070,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) while (fw_state != MFI_STATE_READY) { - abs_state = - instance->instancet->read_fw_status_reg(instance->reg_set); - switch (fw_state) { case MFI_STATE_FAULT: @@ -3223,10 +3221,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { - fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & - MFI_STATE_MASK ; - curr_abs_state = - instance->instancet->read_fw_status_reg(instance->reg_set); + curr_abs_state = instance->instancet-> + read_fw_status_reg(instance->reg_set); if (abs_state == curr_abs_state) { msleep(1); @@ -3242,6 +3238,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr) "in %d secs\n", fw_state, max_wait); return -ENODEV; } + + abs_state = curr_abs_state; + fw_state = curr_abs_state & MFI_STATE_MASK; } printk(KERN_INFO "megasas: FW now in Ready state\n"); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index bde63f7452bd..8b88118e20e6 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1739,14 +1739,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) list_for_each_entry_safe(chain_req, next, &ioc->scsi_lookup[i].chain_list, tracker_list) { list_del_init(&chain_req->tracker_list); - list_add_tail(&chain_req->tracker_list, + list_add(&chain_req->tracker_list, &ioc->free_chain_list); } } ioc->scsi_lookup[i].cb_idx = 0xFF; ioc->scsi_lookup[i].scmd = NULL; ioc->scsi_lookup[i].direct_io = 0; - list_add_tail(&ioc->scsi_lookup[i].tracker_list, + list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); @@ -1764,13 +1764,13 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) /* hi-priority */ i = smid - ioc->hi_priority_smid; ioc->hpr_lookup[i].cb_idx = 0xFF; - list_add_tail(&ioc->hpr_lookup[i].tracker_list, + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); } else if (smid <= ioc->hba_queue_depth) { /* internal queue */ i = smid - ioc->internal_smid; ioc->internal_lookup[i].cb_idx = 0xFF; - list_add_tail(&ioc->internal_lookup[i].tracker_list, + list_add(&ioc->internal_lookup[i].tracker_list, &ioc->internal_free_list); } spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 1f2ac3a28621..fd3b998c75b1 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -1065,7 +1065,7 @@ void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply); int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, - ulong timeout, unsigned long serial_number, enum mutex_type m_type); + ulong timeout, enum mutex_type m_type); void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle); void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index b7f887c9b0bf..62df8f9d4271 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -987,7 +987,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg, mpt2sas_scsih_issue_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, - 0, TM_MUTEX_ON); + TM_MUTEX_ON); ioc->tm_cmds.status = MPT2_CMD_NOT_USED; } else mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 7f0af4fcc001..5055f925d2cd 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2368,7 +2368,6 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) * @smid_task: smid assigned to the task * @timeout: timeout in seconds - * @serial_number: the serial_number from scmd * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF * Context: user * @@ -2381,7 +2380,7 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle) int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout, - unsigned long serial_number, enum mutex_type m_type) + enum mutex_type m_type) { Mpi2SCSITaskManagementRequest_t *mpi_request; Mpi2SCSITaskManagementReply_t *mpi_reply; @@ -2634,8 +2633,7 @@ _scsih_abort(struct scsi_cmnd *scmd) handle = sas_device_priv_data->sas_target->handle; r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd->serial_number, TM_MUTEX_ON); + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", @@ -2696,8 +2694,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, - TM_MUTEX_ON); + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", @@ -2757,7 +2754,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, - 30, 0, TM_MUTEX_ON); + 30, TM_MUTEX_ON); out: starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", @@ -3953,9 +3950,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int -_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { - struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); struct MPT2SAS_DEVICE *sas_device_priv_data; struct MPT2SAS_TARGET *sas_target_priv_data; struct _raid_device *raid_device; @@ -3963,7 +3960,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) u32 mpi_control; u16 smid; - scmd->scsi_done = done; sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -4039,7 +4035,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) MPT_TARGET_FLAGS_RAID_COMPONENT) mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; else - mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; mpi_request->DevHandle = cpu_to_le16(sas_device_priv_data->sas_target->handle); mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); @@ -4083,8 +4079,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) return SCSI_MLQUEUE_HOST_BUSY; } -static DEF_SCSI_QCMD(_scsih_qcmd) - /** * _scsih_normalize_sense - normalize descriptor and fixed format sense data * @sense_buffer: sense data returned by target @@ -5880,7 +5874,7 @@ broadcast_aen_retry: spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, - MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, @@ -5922,7 +5916,7 @@ broadcast_aen_retry: r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd->serial_number, TM_MUTEX_OFF); + TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : " @@ -8293,7 +8287,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) mpt2sas_base_free_resources(ioc); pci_save_state(pdev); - pci_disable_device(pdev); pci_set_power_state(pdev, device_state); return 0; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 0ebf5d913c80..9b90a6fef706 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -993,7 +993,7 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, - ulong timeout, unsigned long serial_number, enum mutex_type m_type); + ulong timeout, enum mutex_type m_type); void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 9b89de14a0a3..ba9cbe598a91 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -980,7 +980,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, mpt3sas_scsih_issue_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, - 0, TM_MUTEX_ON); + TM_MUTEX_ON); } else mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, FORCE_BIG_HAMMER); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index a961fe11b527..18e713db1d32 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2029,7 +2029,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) * @smid_task: smid assigned to the task * @timeout: timeout in seconds - * @serial_number: the serial_number from scmd * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF * Context: user * @@ -2042,7 +2041,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout, - unsigned long serial_number, enum mutex_type m_type) + enum mutex_type m_type) { Mpi2SCSITaskManagementRequest_t *mpi_request; Mpi2SCSITaskManagementReply_t *mpi_reply; @@ -2293,8 +2292,7 @@ _scsih_abort(struct scsi_cmnd *scmd) handle = sas_device_priv_data->sas_target->handle; r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd->serial_number, TM_MUTEX_ON); + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", @@ -2353,8 +2351,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd) r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, scmd->device->lun, - MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, - TM_MUTEX_ON); + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON); out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", @@ -2414,7 +2411,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, - 30, 0, TM_MUTEX_ON); + 30, TM_MUTEX_ON); out: starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", @@ -3518,7 +3515,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) /** - * _scsih_qcmd_lck - main scsi request entry point + * _scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object * @done: function pointer to be invoked on completion * @@ -3529,9 +3526,9 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int -_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) { - struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); struct MPT3SAS_DEVICE *sas_device_priv_data; struct MPT3SAS_TARGET *sas_target_priv_data; Mpi2SCSIIORequest_t *mpi_request; @@ -3544,7 +3541,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) scsi_print_command(scmd); #endif - scmd->scsi_done = done; sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -3659,8 +3655,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) out: return SCSI_MLQUEUE_HOST_BUSY; } -static DEF_SCSI_QCMD(_scsih_qcmd) - /** * _scsih_normalize_sense - normalize descriptor and fixed format sense data @@ -5425,7 +5419,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, - MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, @@ -5467,7 +5461,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, - scmd->serial_number, TM_MUTEX_OFF); + TM_MUTEX_OFF); if (r == FAILED) { sdev_printk(KERN_WARNING, sdev, "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 1e4479f3331a..9270d15ff1a4 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -564,7 +564,7 @@ static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) u32 tmp; tmp = mr32(MVS_GBL_CTL); - tmp |= (IRQ_SAS_A | IRQ_SAS_B); + tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); mw32(MVS_GBL_INT_STAT, tmp); writel(tmp, regs + 0x0C); writel(tmp, regs + 0x10); @@ -580,7 +580,7 @@ static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) tmp = mr32(MVS_GBL_CTL); - tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); + tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B); mw32(MVS_GBL_INT_STAT, tmp); writel(tmp, regs + 0x0C); writel(tmp, regs + 0x10); @@ -596,7 +596,7 @@ static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) if (!(mvi->flags & MVF_FLAG_SOC)) { stat = mr32(MVS_GBL_INT_STAT); - if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) + if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B))) return 0; } return stat; @@ -606,8 +606,8 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) { void __iomem *regs = mvi->regs; - if (((stat & IRQ_SAS_A) && mvi->id == 0) || - ((stat & IRQ_SAS_B) && mvi->id == 1)) { + if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) || + ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) { mw32_f(MVS_INT_STAT, CINT_DONE); spin_lock(&mvi->lock); diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h index 487aa6f97412..14e197497b46 100644 --- a/drivers/scsi/mvsas/mv_94xx.h +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -150,35 +150,35 @@ enum chip_register_bits { enum pci_interrupt_cause { /* MAIN_IRQ_CAUSE (R10200) Bits*/ - IRQ_COM_IN_I2O_IOP0 = (1 << 0), - IRQ_COM_IN_I2O_IOP1 = (1 << 1), - IRQ_COM_IN_I2O_IOP2 = (1 << 2), - IRQ_COM_IN_I2O_IOP3 = (1 << 3), - IRQ_COM_OUT_I2O_HOS0 = (1 << 4), - IRQ_COM_OUT_I2O_HOS1 = (1 << 5), - IRQ_COM_OUT_I2O_HOS2 = (1 << 6), - IRQ_COM_OUT_I2O_HOS3 = (1 << 7), - IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), - IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), - IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), - IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), - IRQ_PCIF_DRBL0 = (1 << 12), - IRQ_PCIF_DRBL1 = (1 << 13), - IRQ_PCIF_DRBL2 = (1 << 14), - IRQ_PCIF_DRBL3 = (1 << 15), - IRQ_XOR_A = (1 << 16), - IRQ_XOR_B = (1 << 17), - IRQ_SAS_A = (1 << 18), - IRQ_SAS_B = (1 << 19), - IRQ_CPU_CNTRL = (1 << 20), - IRQ_GPIO = (1 << 21), - IRQ_UART = (1 << 22), - IRQ_SPI = (1 << 23), - IRQ_I2C = (1 << 24), - IRQ_SGPIO = (1 << 25), - IRQ_COM_ERR = (1 << 29), - IRQ_I2O_ERR = (1 << 30), - IRQ_PCIE_ERR = (1 << 31), + MVS_IRQ_COM_IN_I2O_IOP0 = (1 << 0), + MVS_IRQ_COM_IN_I2O_IOP1 = (1 << 1), + MVS_IRQ_COM_IN_I2O_IOP2 = (1 << 2), + MVS_IRQ_COM_IN_I2O_IOP3 = (1 << 3), + MVS_IRQ_COM_OUT_I2O_HOS0 = (1 << 4), + MVS_IRQ_COM_OUT_I2O_HOS1 = (1 << 5), + MVS_IRQ_COM_OUT_I2O_HOS2 = (1 << 6), + MVS_IRQ_COM_OUT_I2O_HOS3 = (1 << 7), + MVS_IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), + MVS_IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), + MVS_IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), + MVS_IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), + MVS_IRQ_PCIF_DRBL0 = (1 << 12), + MVS_IRQ_PCIF_DRBL1 = (1 << 13), + MVS_IRQ_PCIF_DRBL2 = (1 << 14), + MVS_IRQ_PCIF_DRBL3 = (1 << 15), + MVS_IRQ_XOR_A = (1 << 16), + MVS_IRQ_XOR_B = (1 << 17), + MVS_IRQ_SAS_A = (1 << 18), + MVS_IRQ_SAS_B = (1 << 19), + MVS_IRQ_CPU_CNTRL = (1 << 20), + MVS_IRQ_GPIO = (1 << 21), + MVS_IRQ_UART = (1 << 22), + MVS_IRQ_SPI = (1 << 23), + MVS_IRQ_I2C = (1 << 24), + MVS_IRQ_SGPIO = (1 << 25), + MVS_IRQ_COM_ERR = (1 << 29), + MVS_IRQ_I2O_ERR = (1 << 30), + MVS_IRQ_PCIE_ERR = (1 << 31), }; union reg_phy_cfg { diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 5ff978be249d..eacee48a955c 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -728,6 +728,15 @@ static struct pci_device_id mvs_pci_table[] = { .class_mask = 0, .driver_data = chip_9485, }, + { + .vendor = PCI_VENDOR_ID_MARVELL_EXT, + .device = 0x9485, + .subvendor = PCI_ANY_ID, + .subdevice = 0x9485, + .class = 0, + .class_mask = 0, + .driver_data = chip_9485, + }, { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */ { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index bac04c2335aa..5f4cbf0c4759 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1570,6 +1570,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write, if (unlikely(!req)) return ERR_PTR(-ENOMEM); + blk_rq_set_block_pc(req); return req; } } @@ -1590,7 +1591,6 @@ static int _init_blk_request(struct osd_request *or, } or->request = req; - req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_QUIET; req->timeout = or->timeout; @@ -1608,7 +1608,7 @@ static int _init_blk_request(struct osd_request *or, ret = PTR_ERR(req); goto out; } - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); or->in.req = or->request->next_rq = req; } } else if (has_in) diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 21883a2d6324..0727ea7cc387 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -365,7 +365,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd, if (!req) return DRIVER_ERROR << 24; - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); req->cmd_flags |= REQ_QUIET; SRpnt->bio = NULL; diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index 3721342835e9..aa528f53c533 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h @@ -129,8 +129,6 @@ static int pas16_bus_reset(Scsi_Cmnd *); #define CAN_QUEUE 32 #endif -#ifndef HOSTS_C - #define NCR5380_implementation_fields \ volatile unsigned short io_port @@ -171,6 +169,5 @@ static int pas16_bus_reset(Scsi_Cmnd *); #define PAS16_IRQS 0xd4a8 -#endif /* else def HOSTS_C */ #endif /* ndef ASM */ #endif /* PAS16_H */ diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 28b4e8139153..a368d77b8d41 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -395,6 +395,8 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, payload.offset = 0; payload.length = 4096; payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; @@ -402,6 +404,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, bios_index++) str += sprintf(str, "%c", *((u8 *)((u8 *)virt_addr+bios_index))); + kfree(payload.func_specific); return str - buf; } static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); @@ -729,7 +732,7 @@ static ssize_t pm8001_show_update_fw(struct device *cdev, flash_error_table[i].reason); } -static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO, +static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP, pm8001_show_update_fw, pm8001_store_update_fw); struct device_attribute *pm8001_host_attrs[] = { &dev_attr_interface_rev, diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index c4f31b21feb8..e90c89f1d480 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -677,7 +677,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) * pm8001_get_phy_settings_info : Read phy setting values. * @pm8001_ha : our hba. */ -void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) +static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) { #ifdef PM8001_READ_VPD @@ -691,11 +691,15 @@ void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) payload.offset = 0; payload.length = 4096; payload.func_specific = kzalloc(4096, GFP_KERNEL); + if (!payload.func_specific) + return -ENOMEM; /* Read phy setting values from flash */ PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); + kfree(payload.func_specific); #endif + return 0; } #ifdef PM8001_USE_MSIX @@ -879,8 +883,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev, pm8001_init_sas_add(pm8001_ha); /* phy setting support for motherboard controller */ if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && - pdev->subsystem_vendor != 0) - pm8001_get_phy_settings_info(pm8001_ha); + pdev->subsystem_vendor != 0) { + rc = pm8001_get_phy_settings_info(pm8001_ha); + if (rc) + goto err_out_shost; + } pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); if (rc) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 07befcf365b8..16fe5196e6d9 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -664,7 +664,7 @@ do_read: } rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, - addr, offset, SFP_BLOCK_SIZE, 0); + addr, offset, SFP_BLOCK_SIZE, BIT_1); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x706d, "Unable to read SFP data (%x/%x/%x).\n", rval, @@ -1495,7 +1495,7 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr, if (!ha->fw_dumped) size = 0; - else if (IS_QLA82XX(ha)) + else if (IS_P3P_TYPE(ha)) size = ha->md_template_size + ha->md_dump_size; else size = ha->fw_dump_len; diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 71ff340f6de4..524f9eb7fcd1 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2012 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -2054,9 +2054,49 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job) bsg_job->reply->reply_payload_rcv_len = sizeof(sr); break; default: - ql_log(ql_log_warn, vha, 0x708c, + ql_dbg(ql_dbg_user, vha, 0x708c, "Unknown serdes cmd %x.\n", sr.cmd); - rval = -EDOM; + rval = -EINVAL; + break; + } + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + rval ? EXT_STATUS_MAILBOX : 0; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + return 0; +} + +static int +qla8044_serdes_op(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + int rval = 0; + struct qla_serdes_reg_ex sr; + + memset(&sr, 0, sizeof(sr)); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); + + switch (sr.cmd) { + case INT_SC_SERDES_WRITE_REG: + rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); + bsg_job->reply->reply_payload_rcv_len = 0; + break; + case INT_SC_SERDES_READ_REG: + rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); + bsg_job->reply->reply_payload_rcv_len = sizeof(sr); + break; + default: + ql_dbg(ql_dbg_user, vha, 0x70cf, + "Unknown serdes cmd %x.\n", sr.cmd); + rval = -EINVAL; break; } @@ -2121,6 +2161,9 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) case QL_VND_SERDES_OP: return qla26xx_serdes_op(bsg_job); + case QL_VND_SERDES_OP_EX: + return qla8044_serdes_op(bsg_job); + default: return -ENOSYS; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index e5c2126221e9..d38f9efa56fa 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -24,6 +24,7 @@ #define QL_VND_READ_I2C 0x11 #define QL_VND_FX00_MGMT_CMD 0x12 #define QL_VND_SERDES_OP 0x13 +#define QL_VND_SERDES_OP_EX 0x14 /* BSG Vendor specific subcode returns */ #define EXT_STATUS_OK 0 @@ -225,4 +226,10 @@ struct qla_serdes_reg { uint16_t val; } __packed; +struct qla_serdes_reg_ex { + uint16_t cmd; + uint32_t addr; + uint32_t val; +} __packed; + #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 97255f7c3975..c72ee97bf3f7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -15,7 +15,7 @@ * | | | 0x0144,0x0146 | * | | | 0x015b-0x0160 | * | | | 0x016e-0x0170 | - * | Mailbox commands | 0x1187 | 0x1018-0x1019 | + * | Mailbox commands | 0x118d | 0x1018-0x1019 | * | | | 0x10ca | * | | | 0x1115-0x1116 | * | | | 0x111a-0x111b | @@ -45,12 +45,16 @@ * | | | 0x70ad-0x70ae | * | | | 0x70d7-0x70db | * | | | 0x70de-0x70df | - * | Task Management | 0x803d | 0x8025-0x8026 | - * | | | 0x800b,0x8039 | + * | Task Management | 0x803d | 0x8000,0x800b | + * | | | 0x8019 | + * | | | 0x8025,0x8026 | + * | | | 0x8031,0x8032 | + * | | | 0x8039,0x803c | * | AER/EEH | 0x9011 | | * | Virtual Port | 0xa007 | | - * | ISP82XX Specific | 0xb14c | 0xb002,0xb024 | + * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 | * | | | 0xb09e,0xb0ae | + * | | | 0xb0c3,0xb0c6 | * | | | 0xb0e0-0xb0ef | * | | | 0xb085,0xb0dc | * | | | 0xb107,0xb108 | @@ -60,12 +64,12 @@ * | | | 0xb13c-0xb140 | * | | | 0xb149 | * | MultiQ | 0xc00c | | - * | Misc | 0xd2ff | 0xd017-0xd019 | + * | Misc | 0xd212 | 0xd017-0xd019 | * | | | 0xd020 | - * | | | 0xd02e-0xd0ff | + * | | | 0xd030-0xd0ff | * | | | 0xd101-0xd1fe | - * | | | 0xd212-0xd2fe | - * | Target Mode | 0xe070 | 0xe021 | + * | | | 0xd213-0xd2fe | + * | Target Mode | 0xe078 | | * | Target Mode Management | 0xf072 | 0xf002-0xf003 | * | | | 0xf046-0xf049 | * | Target Mode Task Management | 0x1000b | | @@ -277,9 +281,15 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, if (rval != QLA_SUCCESS) return rval; + set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); + /* External Memory. */ - return qla24xx_dump_ram(ha, 0x100000, *nxt, + rval = qla24xx_dump_ram(ha, 0x100000, *nxt, ha->fw_memory_size - 0x100000 + 1, nxt); + if (rval == QLA_SUCCESS) + set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); + + return rval; } static uint32_t * @@ -296,23 +306,15 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, return buf; } -int -qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) +void +qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) { - int rval = QLA_SUCCESS; - uint32_t cnt; - WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); - for (cnt = 30000; - ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && - rval == QLA_SUCCESS; cnt--) { - if (cnt) - udelay(100); - else - rval = QLA_FUNCTION_TIMEOUT; - } - return rval; + /* 100 usec delay is sufficient enough for hardware to pause RISC */ + udelay(100); + if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) + set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); } int @@ -320,10 +322,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha) { int rval = QLA_SUCCESS; uint32_t cnt; - uint16_t mb0, wd; + uint16_t wd; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - /* Reset RISC. */ + /* + * Reset RISC. The delay is dependent on system architecture. + * Driver can proceed with the reset sequence after waiting + * for a timeout period. + */ WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); for (cnt = 0; cnt < 30000; cnt++) { if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) @@ -331,19 +337,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha) udelay(10); } + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) + set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); WRT_REG_DWORD(®->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); udelay(100); - /* Wait for firmware to complete NVRAM accesses. */ - mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); - for (cnt = 10000 ; cnt && mb0; cnt--) { - udelay(5); - mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); - barrier(); - } /* Wait for soft-reset to complete. */ for (cnt = 0; cnt < 30000; cnt++) { @@ -353,16 +354,21 @@ qla24xx_soft_reset(struct qla_hw_data *ha) udelay(10); } + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); RD_REG_DWORD(®->hccr); /* PCI Posting. */ - for (cnt = 30000; RD_REG_WORD(®->mailbox0) != 0 && + for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) - udelay(100); + udelay(10); else rval = QLA_FUNCTION_TIMEOUT; } + if (rval == QLA_SUCCESS) + set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); return rval; } @@ -659,12 +665,13 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xd000, - "Failed to dump firmware (%x).\n", rval); + "Failed to dump firmware (%x), dump status flags (0x%lx).\n", + rval, ha->fw_dump_cap_flags); ha->fw_dumped = 0; } else { ql_log(ql_log_info, vha, 0xd001, - "Firmware dump saved to temp buffer (%ld/%p).\n", - vha->host_no, ha->fw_dump); + "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", + vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } @@ -1053,6 +1060,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) risc_address = ext_mem_cnt = 0; flags = 0; + ha->fw_dump_cap_flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1075,10 +1083,11 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) fw->host_status = htonl(RD_REG_DWORD(®->host_status)); - /* Pause RISC. */ - rval = qla24xx_pause_risc(reg); - if (rval != QLA_SUCCESS) - goto qla24xx_fw_dump_failed_0; + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); /* Host interface registers. */ dmp_reg = ®->flash_addr; @@ -1302,6 +1311,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) risc_address = ext_mem_cnt = 0; flags = 0; + ha->fw_dump_cap_flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1325,10 +1335,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) fw->host_status = htonl(RD_REG_DWORD(®->host_status)); - /* Pause RISC. */ - rval = qla24xx_pause_risc(reg); - if (rval != QLA_SUCCESS) - goto qla25xx_fw_dump_failed_0; + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; @@ -1619,6 +1630,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) risc_address = ext_mem_cnt = 0; flags = 0; + ha->fw_dump_cap_flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1641,10 +1653,11 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) fw->host_status = htonl(RD_REG_DWORD(®->host_status)); - /* Pause RISC. */ - rval = qla24xx_pause_risc(reg); - if (rval != QLA_SUCCESS) - goto qla81xx_fw_dump_failed_0; + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; @@ -1938,6 +1951,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) risc_address = ext_mem_cnt = 0; flags = 0; + ha->fw_dump_cap_flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1959,10 +1973,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) fw->host_status = htonl(RD_REG_DWORD(®->host_status)); - /* Pause RISC. */ - rval = qla24xx_pause_risc(reg); - if (rval != QLA_SUCCESS) - goto qla83xx_fw_dump_failed_0; + /* + * Pause RISC. No need to track timeout, as resetting the chip + * is the right approach incase of pause timeout + */ + qla24xx_pause_risc(reg, ha); WRT_REG_DWORD(®->iobase_addr, 0x6000); dmp_reg = ®->iobase_window; @@ -2385,9 +2400,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) nxt += sizeof(fw->code_ram); nxt += (ha->fw_memory_size - 0x100000 + 1); goto copy_queue; - } else + } else { + set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); ql_log(ql_log_warn, vha, 0xd010, "bigger hammer success?\n"); + } } rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index cc961040f8b1..e1fc4e66966a 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -353,5 +353,6 @@ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, uint32_t, void **); extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, uint32_t, void **); -extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *); +extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, + struct qla_hw_data *); extern int qla24xx_soft_reset(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 6a106136716c..de5d0ae19d83 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -965,6 +965,13 @@ struct mbx_cmd_32 { */ #define MBC_WRITE_MPI_REGISTER 0x01 /* Write MPI Register. */ +/* + * ISP8044 mailbox commands + */ +#define MBC_SET_GET_ETH_SERDES_REG 0x150 +#define HCS_WRITE_SERDES 0x3 +#define HCS_READ_SERDES 0x4 + /* Firmware return data sizes */ #define FCAL_MAP_SIZE 128 @@ -1622,25 +1629,35 @@ typedef struct { #define PO_MODE_DIF_PASS 2 #define PO_MODE_DIF_REPLACE 3 #define PO_MODE_DIF_TCP_CKSUM 6 -#define PO_ENABLE_DIF_BUNDLING BIT_8 #define PO_ENABLE_INCR_GUARD_SEED BIT_3 -#define PO_DISABLE_INCR_REF_TAG BIT_5 #define PO_DISABLE_GUARD_CHECK BIT_4 +#define PO_DISABLE_INCR_REF_TAG BIT_5 +#define PO_DIS_HEADER_MODE BIT_7 +#define PO_ENABLE_DIF_BUNDLING BIT_8 +#define PO_DIS_FRAME_MODE BIT_9 +#define PO_DIS_VALD_APP_ESC BIT_10 /* Dis validation for escape tag/ffffh */ +#define PO_DIS_VALD_APP_REF_ESC BIT_11 + +#define PO_DIS_APP_TAG_REPL BIT_12 /* disable REG Tag replacement */ +#define PO_DIS_REF_TAG_REPL BIT_13 +#define PO_DIS_APP_TAG_VALD BIT_14 /* disable REF Tag validation */ +#define PO_DIS_REF_TAG_VALD BIT_15 + /* * ISP queue - 64-Bit addressing, continuation crc entry structure definition. */ struct crc_context { uint32_t handle; /* System handle. */ - uint32_t ref_tag; - uint16_t app_tag; + __le32 ref_tag; + __le16 app_tag; uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ - uint16_t guard_seed; /* Initial Guard Seed */ - uint16_t prot_opts; /* Requested Data Protection Mode */ - uint16_t blk_size; /* Data size in bytes */ + __le16 guard_seed; /* Initial Guard Seed */ + __le16 prot_opts; /* Requested Data Protection Mode */ + __le16 blk_size; /* Data size in bytes */ uint16_t runt_blk_guard; /* Guard value for runt block (tape * only) */ - uint32_t byte_count; /* Total byte count/ total data + __le32 byte_count; /* Total byte count/ total data * transfer count */ union { struct { @@ -1654,10 +1671,10 @@ struct crc_context { uint32_t reserved_6; } nobundling; struct { - uint32_t dif_byte_count; /* Total DIF byte + __le32 dif_byte_count; /* Total DIF byte * count */ uint16_t reserved_1; - uint16_t dseg_count; /* Data segment count */ + __le16 dseg_count; /* Data segment count */ uint32_t reserved_2; uint32_t data_address[2]; uint32_t data_length; @@ -1748,6 +1765,8 @@ typedef struct { #define CS_PORT_CONFIG_CHG 0x2A /* Port Configuration Changed */ #define CS_PORT_BUSY 0x2B /* Port Busy */ #define CS_COMPLETE_CHKCOND 0x30 /* Error? */ +#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request + failure */ #define CS_BAD_PAYLOAD 0x80 /* Driver defined */ #define CS_UNKNOWN 0x81 /* Driver defined */ #define CS_RETRY 0x82 /* Driver defined */ @@ -2676,6 +2695,7 @@ struct rsp_que { uint32_t __iomem *rsp_q_out; uint16_t ring_index; uint16_t out_ptr; + uint16_t *in_ptr; /* queue shadow in index */ uint16_t length; uint16_t options; uint16_t rid; @@ -2702,6 +2722,7 @@ struct req_que { uint32_t __iomem *req_q_out; uint16_t ring_index; uint16_t in_ptr; + uint16_t *out_ptr; /* queue shadow out index */ uint16_t cnt; uint16_t length; uint16_t options; @@ -2907,6 +2928,8 @@ struct qla_hw_data { #define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031 #define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 #define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 +#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 + uint32_t device_type; #define DT_ISP2100 BIT_0 #define DT_ISP2200 BIT_1 @@ -2928,7 +2951,8 @@ struct qla_hw_data { #define DT_ISPFX00 BIT_17 #define DT_ISP8044 BIT_18 #define DT_ISP2071 BIT_19 -#define DT_ISP_LAST (DT_ISP2071 << 1) +#define DT_ISP2271 BIT_20 +#define DT_ISP_LAST (DT_ISP2271 << 1) #define DT_T10_PI BIT_25 #define DT_IIDMA BIT_26 @@ -2959,6 +2983,7 @@ struct qla_hw_data { #define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031) #define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) +#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271) #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ IS_QLA6312(ha) || IS_QLA6322(ha)) @@ -2967,7 +2992,7 @@ struct qla_hw_data { #define IS_QLA25XX(ha) (IS_QLA2532(ha)) #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) #define IS_QLA84XX(ha) (IS_QLA8432(ha)) -#define IS_QLA27XX(ha) (IS_QLA2071(ha)) +#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha)) #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ IS_QLA84XX(ha)) #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ @@ -3006,6 +3031,7 @@ struct qla_hw_data { (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) +#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) /* HBA serial number */ uint8_t serial0; @@ -3136,7 +3162,15 @@ struct qla_hw_data { struct qla2xxx_fw_dump *fw_dump; uint32_t fw_dump_len; int fw_dumped; + unsigned long fw_dump_cap_flags; +#define RISC_PAUSE_CMPL 0 +#define DMA_SHUTDOWN_CMPL 1 +#define ISP_RESET_CMPL 2 +#define RISC_RDY_AFT_RESET 3 +#define RISC_SRAM_DUMP_CMPL 4 +#define RISC_EXT_MEM_DUMP_CMPL 5 int fw_dump_reading; + int prev_minidump_failed; dma_addr_t eft_dma; void *eft; /* Current size of mctp dump is 0x086064 bytes */ diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 32ab80957688..2ca39b8e7166 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 3a7353eaccbd..eb8f57249f1d 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -371,7 +371,10 @@ struct init_cb_24xx { * BIT 14 = Data Rate bit 1 * BIT 15 = Data Rate bit 2 * BIT 16 = Enable 75 ohm Termination Select - * BIT 17-31 = Reserved + * BIT 17-28 = Reserved + * BIT 29 = Enable response queue 0 in index shadowing + * BIT 30 = Enable request queue 0 out index shadowing + * BIT 31 = Reserved */ uint32_t firmware_options_3; uint16_t qos; @@ -1134,13 +1137,6 @@ struct device_reg_24xx { #define MIN_MULTI_ID_FABRIC 64 /* Must be power-of-2. */ #define MAX_MULTI_ID_FABRIC 256 /* ... */ -#define for_each_mapped_vp_idx(_ha, _idx) \ - for (_idx = find_next_bit((_ha)->vp_idx_map, \ - (_ha)->max_npiv_vports + 1, 1); \ - _idx <= (_ha)->max_npiv_vports; \ - _idx = find_next_bit((_ha)->vp_idx_map, \ - (_ha)->max_npiv_vports + 1, _idx + 1)) \ - struct mid_conf_entry_24xx { uint16_t reserved_1; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index e665e8109933..d48dea8fab1b 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -220,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); extern int qla2x00_issue_marker(scsi_qla_host_t *, int); +extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, + uint32_t *, uint16_t, struct qla_tgt_cmd *); +extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, + uint32_t *, uint16_t, struct qla_tgt_cmd *); +extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, + uint32_t *, uint16_t, struct qla_tgt_cmd *); + /* * Global Function Prototypes in qla_mbx.c source file. @@ -347,6 +354,11 @@ extern int qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *); extern int +qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t); +extern int +qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *); + +extern int qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t); extern int diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index e377f9d2f92a..a0df3b1b3823 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 38aeb54cd9d8..e2184412617d 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -1476,6 +1476,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) } ha->fw_dumped = 0; + ha->fw_dump_cap_flags = 0; dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; req_q_size = rsp_q_size = 0; @@ -2061,6 +2062,10 @@ qla24xx_config_rings(struct scsi_qla_host *vha) icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); + if (IS_SHADOW_REG_CAPABLE(ha)) + icb->firmware_options_2 |= + __constant_cpu_to_le32(BIT_30|BIT_29); + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); icb->rid = __constant_cpu_to_le16(rid); @@ -2138,6 +2143,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha) req = ha->req_q_map[que]; if (!req) continue; + req->out_ptr = (void *)(req->ring + req->length); + *req->out_ptr = 0; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) req->outstanding_cmds[cnt] = NULL; @@ -2153,6 +2160,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha) rsp = ha->rsp_q_map[que]; if (!rsp) continue; + rsp->in_ptr = (void *)(rsp->ring + rsp->length); + *rsp->in_ptr = 0; /* Initialize response queue entries */ if (IS_QLAFX00(ha)) qlafx00_init_response_q_entries(rsp); @@ -3406,7 +3415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - fcport->loop_id = FC_NO_LOOP_ID; + qla2x00_clear_loop_id(fcport); } } } @@ -4727,7 +4736,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *vha) { int status = 0; - uint32_t wait_time; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; @@ -4744,14 +4752,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) if (!status && !(status = qla2x00_init_rings(vha))) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); ha->flags.chip_reset_done = 1; + /* Initialize the queues in use */ qla25xx_init_queues(ha); status = qla2x00_fw_ready(vha); if (!status) { - ql_dbg(ql_dbg_taskm, vha, 0x8031, - "Start configure loop status = %d.\n", status); - /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); @@ -4766,24 +4772,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha) qlt_24xx_process_atio_queue(vha); spin_unlock_irqrestore(&ha->hardware_lock, flags); - /* Wait at most MAX_TARGET RSCNs for a stable link. */ - wait_time = 256; - do { - clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - qla2x00_configure_loop(vha); - wait_time--; - } while (!atomic_read(&vha->loop_down_timer) && - !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) - && wait_time && (test_bit(LOOP_RESYNC_NEEDED, - &vha->dpc_flags))); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } /* if no cable then assume it's good */ if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; - - ql_dbg(ql_dbg_taskm, vha, 0x8032, - "Configure loop done, status = 0x%x.\n", status); } return (status); } @@ -6130,7 +6124,6 @@ int qla82xx_restart_isp(scsi_qla_host_t *vha) { int status, rval; - uint32_t wait_time; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; @@ -6144,31 +6137,15 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) status = qla2x00_fw_ready(vha); if (!status) { - ql_log(ql_log_info, vha, 0x803c, - "Start configure loop, status =%d.\n", status); - /* Issue a marker after FW becomes ready. */ qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); - vha->flags.online = 1; - /* Wait at most MAX_TARGET RSCNs for a stable link. */ - wait_time = 256; - do { - clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - qla2x00_configure_loop(vha); - wait_time--; - } while (!atomic_read(&vha->loop_down_timer) && - !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) && - wait_time && - (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } /* if no cable then assume it's good */ if ((vha->device_flags & DFLG_NO_CABLE)) status = 0; - - ql_log(ql_log_info, vha, 0x8000, - "Configure loop done, status = 0x%x.\n", status); } if (!status) { @@ -6182,8 +6159,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) vha->marker_needed = 1; } - vha->flags.online = 1; - ha->isp_ops->enable_intrs(ha); ha->isp_abort_cnt = 0; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index ce8b5fb0f347..b3b1d6fc2d6c 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -1,10 +1,11 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ +#include "qla_target.h" /** * qla24xx_calc_iocbs() - Determine number of Command Type 3 and * Continuation Type 1 IOCBs to allocate. @@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) { } static inline void -qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) +qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, + struct qla_tgt_cmd *tc) { struct dsd_dma *dsd_ptr, *tdsd_ptr; struct crc_context *ctx; - ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); + if (sp) + ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); + else if (tc) + ctx = (struct crc_context *)tc->ctx; + else { + BUG(); + return; + } /* clean up allocated prev pool */ list_for_each_entry_safe(dsd_ptr, tdsd_ptr, diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index e607568bce49..760931529592 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, return 1; } -static int +int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, uint16_t tot_dsds) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; @@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, uint32_t *cur_dsd = dsd; uint16_t used_dsds = tot_dsds; - uint32_t prot_int; + uint32_t prot_int; /* protection interval */ uint32_t partial; struct qla2_sgx sgx; dma_addr_t sle_dma; uint32_t sle_dma_len, tot_prot_dma_len = 0; - struct scsi_cmnd *cmd = GET_CMD_SP(sp); - - prot_int = cmd->device->sector_size; + struct scsi_cmnd *cmd; + struct scsi_qla_host *vha; memset(&sgx, 0, sizeof(struct qla2_sgx)); - sgx.tot_bytes = scsi_bufflen(cmd); - sgx.cur_sg = scsi_sglist(cmd); - sgx.sp = sp; - - sg_prot = scsi_prot_sglist(cmd); + if (sp) { + vha = sp->fcport->vha; + cmd = GET_CMD_SP(sp); + prot_int = cmd->device->sector_size; + + sgx.tot_bytes = scsi_bufflen(cmd); + sgx.cur_sg = scsi_sglist(cmd); + sgx.sp = sp; + + sg_prot = scsi_prot_sglist(cmd); + } else if (tc) { + vha = tc->vha; + prot_int = tc->blk_sz; + sgx.tot_bytes = tc->bufflen; + sgx.cur_sg = tc->sg; + sg_prot = tc->prot_sg; + } else { + BUG(); + return 1; + } while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { @@ -995,10 +1009,18 @@ alloc_and_fill: return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); + + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } - sp->flags |= SRB_CRC_CTX_DSD_VALID; /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1033,21 +1055,35 @@ alloc_and_fill: return 0; } -static int +int qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, - uint16_t tot_dsds) + uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; - struct scatterlist *sg; + struct scatterlist *sg, *sgl; uint32_t *cur_dsd = dsd; int i; uint16_t used_dsds = tot_dsds; - struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct scsi_cmnd *cmd; + struct scsi_qla_host *vha; + + if (sp) { + cmd = GET_CMD_SP(sp); + sgl = scsi_sglist(cmd); + vha = sp->fcport->vha; + } else if (tc) { + sgl = tc->sg; + vha = tc->vha; + } else { + BUG(); + return 1; + } - scsi_for_each_sg(cmd, sg, tot_dsds, i) { + + for_each_sg(sgl, sg, tot_dsds, i) { dma_addr_t sle_dma; /* Allocate additional continuation packets? */ @@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); - sp->flags |= SRB_CRC_CTX_DSD_VALID; + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, return 0; } -static int +int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, - uint32_t *dsd, - uint16_t tot_dsds) + uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) { void *next_dsd; uint8_t avail_dsds = 0; uint32_t dsd_list_len; struct dsd_dma *dsd_ptr; - struct scatterlist *sg; + struct scatterlist *sg, *sgl; int i; struct scsi_cmnd *cmd; uint32_t *cur_dsd = dsd; - uint16_t used_dsds = tot_dsds; + uint16_t used_dsds = tot_dsds; + struct scsi_qla_host *vha; + + if (sp) { + cmd = GET_CMD_SP(sp); + sgl = scsi_prot_sglist(cmd); + vha = sp->fcport->vha; + } else if (tc) { + vha = tc->vha; + sgl = tc->prot_sg; + } else { + BUG(); + return 1; + } - cmd = GET_CMD_SP(sp); - scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { + ql_dbg(ql_dbg_tgt, vha, 0xe021, + "%s: enter\n", __func__); + + for_each_sg(sgl, sg, tot_dsds, i) { dma_addr_t sle_dma; /* Allocate additional continuation packets? */ @@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, return 1; } - list_add_tail(&dsd_ptr->list, - &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); + if (sp) { + list_add_tail(&dsd_ptr->list, + &((struct crc_context *) + sp->u.scmd.ctx)->dsd_list); - sp->flags |= SRB_CRC_CTX_DSD_VALID; + sp->flags |= SRB_CRC_CTX_DSD_VALID; + } else { + list_add_tail(&dsd_ptr->list, + &(tc->ctx->dsd_list)); + tc->ctx_dsd_alloced = 1; + } /* add new list to cmd iocb or last list */ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, if (!bundling && tot_prot_dsds) { if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, - cur_dsd, tot_dsds)) + cur_dsd, tot_dsds, NULL)) goto crc_queuing_error; } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, - (tot_dsds - tot_prot_dsds))) + (tot_dsds - tot_prot_dsds), NULL)) goto crc_queuing_error; if (bundling && tot_prot_dsds) { @@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE); cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, - tot_prot_dsds)) + tot_prot_dsds, NULL)) goto crc_queuing_error; } return QLA_SUCCESS; @@ -1478,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp) tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1697,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp) tot_prot_dsds = nseg; tot_dsds += nseg; if (req->cnt < (req_cnt + 2)) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -2825,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { - cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 95314ef2e505..a56825c73c31 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -2009,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ql_dbg(ql_dbg_io, vha, 0x3017, "Invalid status handle (0x%x).\n", sts->handle); - if (IS_P3P_TYPE(ha)) - set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); - else - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + if (IS_P3P_TYPE(ha)) + set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); + else + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } return; } @@ -2472,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (pkt->entry_status != 0) { qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); - (void)qlt_24xx_process_response_error(vha, pkt); + if (qlt_24xx_process_response_error(vha, pkt)) + goto process_err; ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } +process_err: switch (pkt->entry_type) { case STATUS_TYPE: @@ -2494,10 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); break; - case CT_IOCB_TYPE: + case CT_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); break; - case ELS_IOCB_TYPE: + case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; case ABTS_RECV_24XX: @@ -2506,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, case ABTS_RESP_24XX: case CTIO_TYPE7: case NOTIFY_ACK_TYPE: + case CTIO_CRC2: qlt_response_pkt_all_vps(vha, (response_t *)pkt); break; case MARKER_TYPE: diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 2528709c4add..1c33a77db5c2 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -1319,7 +1319,7 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) left = 0; - list = kzalloc(dma_size, GFP_KERNEL); + list = kmemdup(pmap, dma_size, GFP_KERNEL); if (!list) { ql_log(ql_log_warn, vha, 0x1140, "%s(%ld): failed to allocate node names list " @@ -1328,7 +1328,6 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len) goto out_free; } - memcpy(list, pmap, dma_size); restart: dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma); } @@ -2644,7 +2643,10 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx, vha, 0x1090, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(abt->nport_handle)); - rval = QLA_FUNCTION_FAILED; + if (abt->nport_handle == CS_IOCB_ERROR) + rval = QLA_FUNCTION_PARAMETER_ERROR; + else + rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, "Done %s.\n", __func__); @@ -2879,6 +2881,78 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) return rval; } +int +qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA8044(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; + mcp->mb[1] = HCS_WRITE_SERDES; + mcp->mb[3] = LSW(addr); + mcp->mb[4] = MSW(addr); + mcp->mb[5] = LSW(data); + mcp->mb[6] = MSW(data); + mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1187, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, + "Done %s.\n", __func__); + } + + return rval; +} + +int +qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA8044(vha->hw)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; + mcp->mb[1] = HCS_READ_SERDES; + mcp->mb[3] = LSW(addr); + mcp->mb[4] = MSW(addr); + mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + *data = mcp->mb[2] << 16 | mcp->mb[1]; + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x118a, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, + "Done %s.\n", __func__); + } + + return rval; +} + /** * qla2x00_set_serdes_params() - * @ha: HA context @@ -3660,6 +3734,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3, "Entered %s.\n", __func__); + if (IS_SHADOW_REG_CAPABLE(ha)) + req->options |= BIT_13; + mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = req->options; mcp->mb[2] = MSW(LSD(req->dma)); @@ -3679,7 +3756,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) /* que in ptr index */ mcp->mb[8] = 0; /* que out ptr index */ - mcp->mb[9] = 0; + mcp->mb[9] = *req->out_ptr = 0; mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7| MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; @@ -3688,7 +3765,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) mcp->in_mb |= MBX_1; - if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { mcp->out_mb |= MBX_15; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; @@ -3697,7 +3774,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { WRT_REG_DWORD(req->req_q_in, 0); - if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) WRT_REG_DWORD(req->req_q_out, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3726,6 +3803,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6, "Entered %s.\n", __func__); + if (IS_SHADOW_REG_CAPABLE(ha)) + rsp->options |= BIT_13; + mcp->mb[0] = MBC_INITIALIZE_MULTIQ; mcp->mb[1] = rsp->options; mcp->mb[2] = MSW(LSD(rsp->dma)); @@ -3740,7 +3820,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[4] = rsp->id; /* que in ptr index */ - mcp->mb[8] = 0; + mcp->mb[8] = *rsp->in_ptr = 0; /* que out ptr index */ mcp->mb[9] = 0; mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index f0a852257f99..89998244f48d 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 0aaf6a9c87d3..abeb3901498b 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -527,21 +527,63 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; int i, core; uint32_t cnt; + uint32_t reg_val; + + spin_lock_irqsave(&ha->hardware_lock, flags); + + QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); + QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); + + /* stop the XOR DMA engines */ + QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); + QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); + + /* stop the IDMA engines */ + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); + + reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); + reg_val &= ~(1<<12); + QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); + + for (i = 0; i < 100000; i++) { + if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && + (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) + break; + udelay(100); + } /* Set all 4 cores in reset */ for (i = 0; i < 4; i++) { QLAFX00_SET_HBA_SOC_REG(ha, (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); - } - - /* Set all 4 core Clock gating control */ - for (i = 0; i < 4; i++) { QLAFX00_SET_HBA_SOC_REG(ha, (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101)); } /* Reset all units in Fabric */ - QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101)); + QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); + + /* */ + QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); + QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); + + /* Set all 4 core Memory Power Down Registers */ + for (i = 0; i < 5; i++) { + QLAFX00_SET_HBA_SOC_REG(ha, + (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0)); + } /* Reset all interrupt control registers */ for (i = 0; i < 115; i++) { @@ -564,20 +606,19 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2)); QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); - spin_lock_irqsave(&ha->hardware_lock, flags); - /* Kick in Fabric units */ QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0)); /* Kick in Core0 to start boot process */ QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + /* Wait 10secs for soft-reset to complete. */ for (cnt = 10; cnt; cnt--) { msleep(1000); barrier(); } - spin_unlock_irqrestore(&ha->hardware_lock, flags); } /** @@ -597,7 +638,6 @@ qlafx00_soft_reset(scsi_qla_host_t *vha) ha->isp_ops->disable_intrs(ha); qlafx00_soc_cpu_reset(vha); - ha->isp_ops->enable_intrs(ha); } /** @@ -2675,7 +2715,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha, uint16_t lreq_q_out = 0; lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); - lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out); + lreq_q_out = rsp->ring_index; while (lreq_q_in != lreq_q_out) { lptr = rsp->ring_ptr; @@ -3426,7 +3466,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) sp->fcport->vha, 0x3047, (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); - memcpy((void *)pfxiocb, &fx_iocb, + memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(struct fxdisc_entry_fx00)); wmb(); } diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h index e529dfaeb854..aeaa1b40b1fc 100644 --- a/drivers/scsi/qla2xxx/qla_mr.h +++ b/drivers/scsi/qla2xxx/qla_mr.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -351,6 +351,7 @@ struct config_info_data { #define SOC_FABRIC_RST_CONTROL_REG 0x0020840 #define SOC_FABRIC_CONTROL_REG 0x0020200 #define SOC_FABRIC_CONFIG_REG 0x0020204 +#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG 0x001820C #define SOC_INTERRUPT_SOURCE_I_CONTROL_REG 0x0020B00 #define SOC_CORE_TIMER_REG 0x0021850 diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 5511e24b1f11..58f3c912d96e 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -848,6 +848,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; uint32_t lock_owner = 0; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (!done) { /* acquire semaphore2 from PCI HW block */ @@ -856,17 +857,21 @@ qla82xx_rom_lock(struct qla_hw_data *ha) break; if (timeout >= qla82xx_rom_lock_timeout) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); + ql_log(ql_log_warn, vha, 0xb157, + "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", + __func__, ha->portnum, lock_owner); return -1; } timeout++; } - qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); + qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum); return 0; } static void qla82xx_rom_unlock(struct qla_hw_data *ha) { + qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff); qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); } @@ -950,6 +955,7 @@ static int qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { int ret, loops = 0; + uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { @@ -958,8 +964,10 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) loops++; } if (loops >= 50000) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_log(ql_log_fatal, vha, 0x00b9, - "Failed to acquire SEM2 lock.\n"); + "Failed to acquire SEM2 lock, Lock Owner %u.\n", + lock_owner); return -1; } ret = qla82xx_do_rom_fast_read(ha, addr, valp); @@ -1057,6 +1065,7 @@ static int ql82xx_rom_lock_d(struct qla_hw_data *ha) { int loops = 0; + uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { @@ -1065,8 +1074,9 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha) loops++; } if (loops >= 50000) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_log(ql_log_warn, vha, 0xb010, - "ROM lock failed.\n"); + "ROM lock failed, Lock Owner %u.\n", lock_owner); return -1; } return 0; @@ -2811,12 +2821,14 @@ static void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + uint32_t lock_owner = 0; - if (qla82xx_rom_lock(ha)) + if (qla82xx_rom_lock(ha)) { + lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); /* Someone else is holding the lock. */ ql_log(ql_log_info, vha, 0xb022, - "Resetting rom_lock.\n"); - + "Resetting rom_lock, Lock Owner %u.\n", lock_owner); + } /* * Either we got the lock, or someone * else died while holding it. @@ -2840,47 +2852,30 @@ static int qla82xx_device_bootstrap(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; - int i, timeout; + int i; uint32_t old_count, count; struct qla_hw_data *ha = vha->hw; - int need_reset = 0, peg_stuck = 1; + int need_reset = 0; need_reset = qla82xx_need_reset(ha); - old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); - - for (i = 0; i < 10; i++) { - timeout = msleep_interruptible(200); - if (timeout) { - qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA8XXX_DEV_FAILED); - return QLA_FUNCTION_FAILED; - } - - count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); - if (count != old_count) - peg_stuck = 0; - } - if (need_reset) { /* We are trying to perform a recovery here. */ - if (peg_stuck) + if (ha->flags.isp82xx_fw_hung) qla82xx_rom_lock_recovery(ha); - goto dev_initialize; } else { - /* Start of day for this ha context. */ - if (peg_stuck) { - /* Either we are the first or recovery in progress. */ - qla82xx_rom_lock_recovery(ha); - goto dev_initialize; - } else - /* Firmware already running. */ - goto dev_ready; + old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + for (i = 0; i < 10; i++) { + msleep(200); + count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + if (count != old_count) { + rval = QLA_SUCCESS; + goto dev_ready; + } + } + qla82xx_rom_lock_recovery(ha); } - return rval; - -dev_initialize: /* set to DEV_INITIALIZING */ ql_log(ql_log_info, vha, 0x009e, "HW State: INITIALIZING.\n"); @@ -3142,18 +3137,18 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha) if (ql2xmdenable) { if (!ha->fw_dumped) { - if (fw_major_version != ha->fw_major_version || + if ((fw_major_version != ha->fw_major_version || fw_minor_version != ha->fw_minor_version || - fw_subminor_version != ha->fw_subminor_version) { + fw_subminor_version != ha->fw_subminor_version) || + (ha->prev_minidump_failed)) { ql_dbg(ql_dbg_p3p, vha, 0xb02d, - "Firmware version differs " - "Previous version: %d:%d:%d - " - "New version: %d:%d:%d\n", + "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n", fw_major_version, fw_minor_version, fw_subminor_version, ha->fw_major_version, ha->fw_minor_version, - ha->fw_subminor_version); + ha->fw_subminor_version, + ha->prev_minidump_failed); /* Release MiniDump resources */ qla82xx_md_free(vha); /* ALlocate MiniDump resources */ @@ -3682,8 +3677,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - if (!sp->u.scmd.ctx || - (sp->flags & SRB_FCP_CMND_DMA_VALID)) { + if ((!sp->u.scmd.ctx || + (sp->flags & + SRB_FCP_CMND_DMA_VALID)) && + !ha->flags.isp82xx_fw_hung) { spin_unlock_irqrestore( &ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 1bb93dbbccbb..59c477883a73 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -333,9 +333,6 @@ #define QLA82XX_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004) #define QLA82XX_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038) -/* Lock IDs for ROM lock */ -#define ROM_LOCK_DRIVER 0x0d417340 - #define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000 /* all are 1MB windows */ #define QLA82XX_PCI_CRB_WINDOW(A) \ (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE) @@ -1186,6 +1183,7 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, #define CRB_NIU_XG_PAUSE_CTL_P1 0x8 #define qla82xx_get_temp_val(x) ((x) >> 16) +#define qla82xx_get_temp_val1(x) ((x) && 0x0000FFFF) #define qla82xx_get_temp_state(x) ((x) & 0xffff) #define qla82xx_encode_temp(val, state) (((val) << 16) | (state)) diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 86cf10815db0..da9e3902f219 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -1,17 +1,20 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include <linux/vmalloc.h> +#include <linux/delay.h> #include "qla_def.h" #include "qla_gbl.h" #include <linux/delay.h> +#define TIMEOUT_100_MS 100 + /* 8044 Flash Read/Write functions */ uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) @@ -117,6 +120,95 @@ qla8044_read_write_crb_reg(struct scsi_qla_host *vha, qla8044_wr_reg_indirect(vha, waddr, value); } +static int +qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1, + uint32_t mask) +{ + unsigned long timeout; + uint32_t temp; + + /* jiffies after 100ms */ + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + if (time_after_eq(jiffies, timeout)) { + ql_log(ql_log_warn, vha, 0xb151, + "Error in processing rdmdio entry\n"); + return -1; + } + } while (1); + + return 0; +} + +static uint32_t +qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha, + uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr) +{ + uint32_t temp; + int ret = 0; + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + temp = (0x40000000 | addr); + qla8044_wr_reg_indirect(vha, addr1, temp); + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return 0; + + qla8044_rd_reg_indirect(vha, addr3, &ret); + + return ret; +} + + +static int +qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha, + uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask) +{ + unsigned long timeout; + uint32_t temp; + + /* jiffies after 100 msecs */ + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2); + if ((temp & 0x1) != 1) + break; + if (time_after_eq(jiffies, timeout)) { + ql_log(ql_log_warn, vha, 0xb152, + "Error in processing mdiobus idle\n"); + return -1; + } + } while (1); + + return 0; +} + +static int +qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1, + uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value) +{ + int ret = 0; + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + qla8044_wr_reg_indirect(vha, addr3, value); + qla8044_wr_reg_indirect(vha, addr1, addr); + + ret = qla8044_poll_wait_for_ready(vha, addr1, mask); + if (ret == -1) + return -1; + + return 0; +} /* * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask, * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. @@ -356,8 +448,8 @@ qla8044_flash_lock(scsi_qla_host_t *vha) lock_owner = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK_ID); ql_log(ql_log_warn, vha, 0xb113, - "%s: flash lock by %d failed, held by %d\n", - __func__, ha->portnum, lock_owner); + "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", + __func__, ha->portnum, lock_owner); ret_val = QLA_FUNCTION_FAILED; break; } @@ -1541,7 +1633,7 @@ static void qla8044_need_reset_handler(struct scsi_qla_host *vha) { uint32_t dev_state = 0, drv_state, drv_active; - unsigned long reset_timeout, dev_init_timeout; + unsigned long reset_timeout; struct qla_hw_data *ha = vha->hw; ql_log(ql_log_fatal, vha, 0xb0c2, @@ -1555,84 +1647,78 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha) qla8044_idc_lock(ha); } + dev_state = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX); drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX); drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); ql_log(ql_log_info, vha, 0xb0c5, - "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", - __func__, vha->host_no, drv_state, drv_active); + "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n", + __func__, vha->host_no, drv_state, drv_active, dev_state); - if (!ha->flags.nic_core_reset_owner) { - ql_dbg(ql_dbg_p3p, vha, 0xb0c3, - "%s(%ld): reset acknowledged\n", - __func__, vha->host_no); - qla8044_set_rst_ready(vha); + qla8044_set_rst_ready(vha); - /* Non-reset owners ACK Reset and wait for device INIT state - * as part of Reset Recovery by Reset Owner - */ - dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); + /* wait for 10 seconds for reset ack from all functions */ + reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); - do { - if (time_after_eq(jiffies, dev_init_timeout)) { - ql_log(ql_log_info, vha, 0xb0c4, - "%s: Non Reset owner: Reset Ack Timeout!\n", - __func__); - break; - } + do { + if (time_after_eq(jiffies, reset_timeout)) { + ql_log(ql_log_info, vha, 0xb0c4, + "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n", + __func__, ha->portnum, drv_state, drv_active); + break; + } - qla8044_idc_unlock(ha); - msleep(1000); - qla8044_idc_lock(ha); + qla8044_idc_unlock(ha); + msleep(1000); + qla8044_idc_lock(ha); - dev_state = qla8044_rd_direct(vha, - QLA8044_CRB_DEV_STATE_INDEX); - } while (((drv_state & drv_active) != drv_active) && - (dev_state == QLA8XXX_DEV_NEED_RESET)); + dev_state = qla8044_rd_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX); + drv_state = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_STATE_INDEX); + drv_active = qla8044_rd_direct(vha, + QLA8044_CRB_DRV_ACTIVE_INDEX); + } while (((drv_state & drv_active) != drv_active) && + (dev_state == QLA8XXX_DEV_NEED_RESET)); + + /* Remove IDC participation of functions not acknowledging */ + if (drv_state != drv_active) { + ql_log(ql_log_info, vha, 0xb0c7, + "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n", + __func__, vha->host_no, ha->portnum, + (drv_active ^ drv_state)); + drv_active = drv_active & drv_state; + qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, + drv_active); } else { - qla8044_set_rst_ready(vha); - - /* wait for 10 seconds for reset ack from all functions */ - reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); - - while ((drv_state & drv_active) != drv_active) { - if (time_after_eq(jiffies, reset_timeout)) { - ql_log(ql_log_info, vha, 0xb0c6, - "%s: RESET TIMEOUT!" - "drv_state: 0x%08x, drv_active: 0x%08x\n", - QLA2XXX_DRIVER_NAME, drv_state, drv_active); - break; - } - - qla8044_idc_unlock(ha); - msleep(1000); - qla8044_idc_lock(ha); - - drv_state = qla8044_rd_direct(vha, - QLA8044_CRB_DRV_STATE_INDEX); - drv_active = qla8044_rd_direct(vha, - QLA8044_CRB_DRV_ACTIVE_INDEX); - } - - if (drv_state != drv_active) { - ql_log(ql_log_info, vha, 0xb0c7, - "%s(%ld): Reset_owner turning off drv_active " - "of non-acking function 0x%x\n", __func__, - vha->host_no, (drv_active ^ drv_state)); - drv_active = drv_active & drv_state; - qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, - drv_active); + /* + * Reset owner should execute reset recovery, + * if all functions acknowledged + */ + if ((ha->flags.nic_core_reset_owner) && + (dev_state == QLA8XXX_DEV_NEED_RESET)) { + ha->flags.nic_core_reset_owner = 0; + qla8044_device_bootstrap(vha); + return; } + } - /* - * Clear RESET OWNER, will be set at next reset - * by next RST_OWNER - */ + /* Exit if non active function */ + if (!(drv_active & (1 << ha->portnum))) { ha->flags.nic_core_reset_owner = 0; + return; + } - /* Start Reset Recovery */ + /* + * Execute Reset Recovery if Reset Owner or Function 7 + * is the only active function + */ + if (ha->flags.nic_core_reset_owner || + ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) { + ha->flags.nic_core_reset_owner = 0; qla8044_device_bootstrap(vha); } } @@ -1655,6 +1741,19 @@ qla8044_set_drv_active(struct scsi_qla_host *vha) qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active); } +static int +qla8044_check_drv_active(struct scsi_qla_host *vha) +{ + uint32_t drv_active; + struct qla_hw_data *ha = vha->hw; + + drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); + if (drv_active & (1 << ha->portnum)) + return QLA_SUCCESS; + else + return QLA_TEST_FAILED; +} + static void qla8044_clear_idc_dontreset(struct scsi_qla_host *vha) { @@ -1837,14 +1936,16 @@ qla8044_device_state_handler(struct scsi_qla_host *vha) while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { - ql_log(ql_log_warn, vha, 0xb0cf, - "%s: Device Init Failed 0x%x = %s\n", - QLA2XXX_DRIVER_NAME, dev_state, - dev_state < MAX_STATES ? - qdev_state(dev_state) : "Unknown"); - - qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, - QLA8XXX_DEV_FAILED); + if (qla8044_check_drv_active(vha) == QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xb0cf, + "%s: Device Init Failed 0x%x = %s\n", + QLA2XXX_DRIVER_NAME, dev_state, + dev_state < MAX_STATES ? + qdev_state(dev_state) : "Unknown"); + qla8044_wr_direct(vha, + QLA8044_CRB_DEV_STATE_INDEX, + QLA8XXX_DEV_FAILED); + } } dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); @@ -2017,6 +2118,13 @@ qla8044_watchdog(struct scsi_qla_host *vha) test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) { dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); + if (qla8044_check_fw_alive(vha)) { + ha->flags.isp82xx_fw_hung = 1; + ql_log(ql_log_warn, vha, 0xb10a, + "Firmware hung.\n"); + qla82xx_clear_pending_mbx(vha); + } + if (qla8044_check_temp(vha)) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; @@ -2037,7 +2145,7 @@ qla8044_watchdog(struct scsi_qla_host *vha) qla2xxx_wake_dpc(vha); } else { /* Check firmware health */ - if (qla8044_check_fw_alive(vha)) { + if (ha->flags.isp82xx_fw_hung) { halt_status = qla8044_rd_direct(vha, QLA8044_PEG_HALT_STATUS1_INDEX); if (halt_status & @@ -2073,12 +2181,8 @@ qla8044_watchdog(struct scsi_qla_host *vha) __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla82xx_clear_pending_mbx(vha); } } - ha->flags.isp82xx_fw_hung = 1; - ql_log(ql_log_warn, vha, 0xb10a, - "Firmware hung.\n"); qla2xxx_wake_dpc(vha); } } @@ -2286,8 +2390,6 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha, } if (j >= MAX_CTL_CHECK) { - printk_ratelimited(KERN_ERR - "%s: failed to read through agent\n", __func__); write_unlock_irqrestore(&ha->hw_lock, flags); return QLA_SUCCESS; } @@ -2882,6 +2984,231 @@ error_exit: return rval; } +static uint32_t +qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + int loop_cnt; + uint32_t addr1, addr2, value, data, temp, wrVal; + uint8_t stride, stride2; + uint16_t count; + uint32_t poll, mask, data_size, modify_mask; + uint32_t wait_count = 0; + + uint32_t *data_ptr = *d_ptr; + + struct qla8044_minidump_entry_rddfe *rddfe; + rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr; + + addr1 = rddfe->addr_1; + value = rddfe->value; + stride = rddfe->stride; + stride2 = rddfe->stride2; + count = rddfe->count; + + poll = rddfe->poll; + mask = rddfe->mask; + modify_mask = rddfe->modify_mask; + data_size = rddfe->data_size; + + addr2 = addr1 + stride; + + for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { + qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value)); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb153, + "%s: TIMEOUT\n", __func__); + goto error; + } else { + qla8044_rd_reg_indirect(vha, addr2, &temp); + temp = temp & modify_mask; + temp = (temp | ((loop_cnt << 16) | loop_cnt)); + wrVal = ((temp << 16) | temp); + + qla8044_wr_reg_indirect(vha, addr2, wrVal); + qla8044_wr_reg_indirect(vha, addr1, value); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb154, + "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_wr_reg_indirect(vha, addr1, + ((0x40000000 | value) + stride2)); + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb155, + "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_rd_reg_indirect(vha, addr2, &data); + + *data_ptr++ = wrVal; + *data_ptr++ = data; + } + + } + + *d_ptr = data_ptr; + return QLA_SUCCESS; + +error: + return -1; + +} + +static uint32_t +qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + int ret = 0; + uint32_t addr1, addr2, value1, value2, data, selVal; + uint8_t stride1, stride2; + uint32_t addr3, addr4, addr5, addr6, addr7; + uint16_t count, loop_cnt; + uint32_t poll, mask; + uint32_t *data_ptr = *d_ptr; + + struct qla8044_minidump_entry_rdmdio *rdmdio; + + rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr; + + addr1 = rdmdio->addr_1; + addr2 = rdmdio->addr_2; + value1 = rdmdio->value_1; + stride1 = rdmdio->stride_1; + stride2 = rdmdio->stride_2; + count = rdmdio->count; + + poll = rdmdio->poll; + mask = rdmdio->mask; + value2 = rdmdio->value_2; + + addr3 = addr1 + stride1; + + for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { + ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, + addr3, mask); + if (ret == -1) + goto error; + + addr4 = addr2 - stride1; + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4, + value2); + if (ret == -1) + goto error; + + addr5 = addr2 - (2 * stride1); + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5, + value1); + if (ret == -1) + goto error; + + addr6 = addr2 - (3 * stride1); + ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, + addr6, 0x2); + if (ret == -1) + goto error; + + ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, + addr3, mask); + if (ret == -1) + goto error; + + addr7 = addr2 - (4 * stride1); + data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, + mask, addr7); + if (data == -1) + goto error; + + selVal = (value2 << 18) | (value1 << 2) | 2; + + stride2 = rdmdio->stride_2; + *data_ptr++ = selVal; + *data_ptr++ = data; + + value1 = value1 + stride2; + *d_ptr = data_ptr; + } + + return 0; + +error: + return -1; +} + +static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, + struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ + uint32_t addr1, addr2, value1, value2, poll, mask, r_value; + uint32_t wait_count = 0; + struct qla8044_minidump_entry_pollwr *pollwr_hdr; + + pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; + addr1 = pollwr_hdr->addr_1; + addr2 = pollwr_hdr->addr_2; + value1 = pollwr_hdr->value_1; + value2 = pollwr_hdr->value_2; + + poll = pollwr_hdr->poll; + mask = pollwr_hdr->mask; + + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__); + goto error; + } + + qla8044_wr_reg_indirect(vha, addr2, value2); + qla8044_wr_reg_indirect(vha, addr1, value1); + + wait_count = 0; + while (wait_count < poll) { + qla8044_rd_reg_indirect(vha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + + return QLA_SUCCESS; + +error: + return -1; +} + /* * * qla8044_collect_md_data - Retrieve firmware minidump data. @@ -3089,6 +3416,24 @@ qla8044_collect_md_data(struct scsi_qla_host *vha) if (rval != QLA_SUCCESS) qla8044_mark_entry_skipped(vha, entry_hdr, i); break; + case QLA8044_RDDFE: + rval = qla8044_minidump_process_rddfe(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_RDMDIO: + rval = qla8044_minidump_process_rdmdio(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA8044_POLLWR: + rval = qla8044_minidump_process_pollwr(vha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla8044_mark_entry_skipped(vha, entry_hdr, i); + break; case QLA82XX_RDNOP: default: qla8044_mark_entry_skipped(vha, entry_hdr, i); @@ -3110,6 +3455,7 @@ skip_nxt_entry: "Dump data mismatch: Data collected: " "[0x%x], total_data_size:[0x%x]\n", data_collected, ha->md_dump_size); + rval = QLA_FUNCTION_FAILED; goto md_failed; } @@ -3134,10 +3480,12 @@ qla8044_get_minidump(struct scsi_qla_host *vha) if (!qla8044_collect_md_data(vha)) { ha->fw_dumped = 1; + ha->prev_minidump_failed = 0; } else { ql_log(ql_log_fatal, vha, 0xb0db, "%s: Unable to collect minidump\n", __func__); + ha->prev_minidump_failed = 1; } } diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h index 2ab2eabab908..ada36057d7cd 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.h +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -133,6 +133,7 @@ #define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4)) #define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4)) #define QLA8044_LINK_SPEED_FACTOR 10 +#define QLA8044_FUN7_ACTIVE_INDEX 0x80 /* FLASH API Defines */ #define QLA8044_FLASH_MAX_WAIT_USEC 100 @@ -431,6 +432,50 @@ struct qla8044_minidump_entry_pollrd { uint32_t rsvd_1; } __packed; +struct qla8044_minidump_entry_rddfe { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t value; + uint8_t stride; + uint8_t stride2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t modify_mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +struct qla8044_minidump_entry_rdmdio { + struct qla8044_minidump_entry_hdr h; + + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint8_t stride_1; + uint8_t stride_2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t value_2; + uint32_t data_size; + +} __packed; + +struct qla8044_minidump_entry_pollwr { + struct qla8044_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll; + uint32_t mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + /* RDMUX2 Entry */ struct qla8044_minidump_entry_rdmux2 { struct qla8044_minidump_entry_hdr h; @@ -516,6 +561,9 @@ static const uint32_t qla8044_reg_tbl[] = { #define QLA8044_DBG_RSVD_ARRAY_LEN 8 #define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16 #define QLA8044_SS_PCI_INDEX 0 +#define QLA8044_RDDFE 38 +#define QLA8044_RDMDIO 39 +#define QLA8044_POLLWR 40 struct qla8044_minidump_template_hdr { uint32_t entry_type; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 19e99cc33724..d96bfb55e57b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -616,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) if (sp->flags & SRB_CRC_CTX_DSD_VALID) { /* List assured to be having elements */ - qla2x00_clean_dsd_pool(ha, sp); + qla2x00_clean_dsd_pool(ha, sp, NULL); sp->flags &= ~SRB_CRC_CTX_DSD_VALID; } @@ -781,7 +781,7 @@ static int qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) { #define ABORT_POLLING_PERIOD 1000 -#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD)) +#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) unsigned long wait_iter = ABORT_WAIT_ITER; scsi_qla_host_t *vha = shost_priv(cmd->device->host); struct qla_hw_data *ha = vha->hw; @@ -844,11 +844,8 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) } /* - * qla2x00_wait_for_reset_ready - * Wait till the HBA is online after going through - * <= MAX_RETRIES_OF_ISP_ABORT or - * finally HBA is disabled ie marked offline or flash - * operations are in progress. + * qla2x00_wait_for_hba_ready + * Wait till the HBA is ready before doing driver unload * * Input: * ha - pointer to host adapter structure @@ -857,35 +854,15 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha) * Does context switching-Release SPIN_LOCK * (if any) before calling this routine. * - * Return: - * Success (Adapter is online/no flash ops) : 0 - * Failed (Adapter is offline/disabled/flash ops in progress) : 1 */ -static int -qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha) +static void +qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha) { - int return_status; - unsigned long wait_online; struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); - while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || - test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || - test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || - ha->optrom_state != QLA_SWAITING || - ha->dpc_active) && time_before(jiffies, wait_online)) + while ((!(vha->flags.online) || ha->dpc_active || + ha->flags.mbox_busy)) msleep(1000); - - if (base_vha->flags.online && ha->optrom_state == QLA_SWAITING) - return_status = QLA_SUCCESS; - else - return_status = QLA_FUNCTION_FAILED; - - ql_dbg(ql_dbg_taskm, vha, 0x8019, - "%s return status=%d.\n", __func__, return_status); - - return return_status; } int @@ -945,7 +922,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) int ret; unsigned int id, lun; unsigned long flags; - int wait = 0; + int rval, wait = 0; struct qla_hw_data *ha = vha->hw; if (!CMD_SP(cmd)) @@ -974,10 +951,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) sp_get(sp); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { - ret = FAILED; + rval = ha->isp_ops->abort_command(sp); + if (rval) { + if (rval == QLA_FUNCTION_PARAMETER_ERROR) { + /* + * Decrement the ref_count since we can't find the + * command + */ + atomic_dec(&sp->ref_count); + ret = SUCCESS; + } else + ret = FAILED; + ql_dbg(ql_dbg_taskm, vha, 0x8003, - "Abort command mbx failed cmd=%p.\n", cmd); + "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval); } else { ql_dbg(ql_dbg_taskm, vha, 0x8004, "Abort command mbx success cmd=%p.\n", cmd); @@ -985,6 +972,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } spin_lock_irqsave(&ha->hardware_lock, flags); + /* + * Clear the slot in the oustanding_cmds array if we can't find the + * command to reclaim the resources. + */ + if (rval == QLA_FUNCTION_PARAMETER_ERROR) + vha->req->outstanding_cmds[sp->handle] = NULL; sp->done(ha, sp, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1236,7 +1229,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) ql_log(ql_log_info, vha, 0x8018, "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); - if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) + /* + * No point in issuing another reset if one is active. Also do not + * attempt a reset if we are updating flash. + */ + if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING) goto eh_host_reset_lock; if (vha != base_vha) { @@ -2270,6 +2267,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type |= DT_IIDMA; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; + case PCI_DEVICE_ID_QLOGIC_ISP2271: + ha->device_type |= DT_ISP2271; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; } if (IS_QLA82XX(ha)) @@ -2346,7 +2350,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || - pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) { + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) { bars = pci_select_bars(pdev, IORESOURCE_MEM); mem_only = 1; ql_dbg_pci(ql_dbg_init, pdev, 0x0007, @@ -2877,6 +2882,7 @@ skip_dpc: base_vha->flags.init_done = 1; base_vha->flags.online = 1; + ha->prev_minidump_failed = 0; ql_dbg(ql_dbg_init, base_vha, 0x00f2, "Init done and hba is online.\n"); @@ -3136,6 +3142,8 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha = pci_get_drvdata(pdev); ha = base_vha->hw; + qla2x00_wait_for_hba_ready(base_vha); + set_bit(UNLOADING, &base_vha->dpc_flags); if (IS_QLAFX00(ha)) @@ -3645,6 +3653,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha) ha->eft = NULL; ha->eft_dma = 0; ha->fw_dumped = 0; + ha->fw_dump_cap_flags = 0; ha->fw_dump_reading = 0; ha->fw_dump = NULL; ha->fw_dump_len = 0; @@ -4828,7 +4837,7 @@ qla2x00_do_dpc(void *data) ha = (struct qla_hw_data *)data; base_vha = pci_get_drvdata(ha->pdev); - set_user_nice(current, -20); + set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { @@ -4913,12 +4922,13 @@ qla2x00_do_dpc(void *data) if (qlafx00_reset_initialize(base_vha)) { /* Failed. Abort isp later. */ if (!test_bit(UNLOADING, - &base_vha->dpc_flags)) + &base_vha->dpc_flags)) { set_bit(ISP_UNRECOVERABLE, &base_vha->dpc_flags); ql_dbg(ql_dbg_dpc, base_vha, 0x4021, "Reset Recovery Failed\n"); + } } } @@ -5077,8 +5087,10 @@ intr_on_check: ha->isp_ops->enable_intrs(ha); if (test_and_clear_bit(BEACON_BLINK_NEEDED, - &base_vha->dpc_flags)) - ha->isp_ops->beacon_blink(base_vha); + &base_vha->dpc_flags)) { + if (ha->beacon_blink_led == 1) + ha->isp_ops->beacon_blink(base_vha); + } if (!IS_QLAFX00(ha)) qla2x00_do_dpc_all_vps(base_vha); @@ -5325,7 +5337,7 @@ qla2x00_timer(scsi_qla_host_t *vha) #define FW_ISP82XX 7 #define FW_ISP2031 8 #define FW_ISP8031 9 -#define FW_ISP2071 10 +#define FW_ISP27XX 10 #define FW_FILE_ISP21XX "ql2100_fw.bin" #define FW_FILE_ISP22XX "ql2200_fw.bin" @@ -5337,7 +5349,7 @@ qla2x00_timer(scsi_qla_host_t *vha) #define FW_FILE_ISP82XX "ql8200_fw.bin" #define FW_FILE_ISP2031 "ql2600_fw.bin" #define FW_FILE_ISP8031 "ql8300_fw.bin" -#define FW_FILE_ISP2071 "ql2700_fw.bin" +#define FW_FILE_ISP27XX "ql2700_fw.bin" static DEFINE_MUTEX(qla_fw_lock); @@ -5353,7 +5365,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = { { .name = FW_FILE_ISP82XX, }, { .name = FW_FILE_ISP2031, }, { .name = FW_FILE_ISP8031, }, - { .name = FW_FILE_ISP2071, }, + { .name = FW_FILE_ISP27XX, }, }; struct fw_blob * @@ -5382,8 +5394,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha) blob = &qla_fw_blobs[FW_ISP2031]; } else if (IS_QLA8031(ha)) { blob = &qla_fw_blobs[FW_ISP8031]; - } else if (IS_QLA2071(ha)) { - blob = &qla_fw_blobs[FW_ISP2071]; + } else if (IS_QLA27XX(ha)) { + blob = &qla_fw_blobs[FW_ISP27XX]; } else { return NULL; } @@ -5714,6 +5726,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h index 46ef0ac48f44..2fb7ebfbbc38 100644 --- a/drivers/scsi/qla2xxx/qla_settings.h +++ b/drivers/scsi/qla2xxx/qla_settings.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index f28123e8ed65..bca173e56f16 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -1727,11 +1727,8 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha) if (IS_QLA2031(ha)) { led_select_value = qla83xx_select_led_port(ha); - qla83xx_wr_reg(vha, led_select_value, 0x40002000); - qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000); - msleep(1000); - qla83xx_wr_reg(vha, led_select_value, 0x40004000); - qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000); + qla83xx_wr_reg(vha, led_select_value, 0x40000230); + qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230); } else if (IS_QLA8031(ha)) { led_select_value = qla83xx_select_led_port(ha); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0cb73074c199..e632e14180cf 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, /* * Global Variables */ -static struct kmem_cache *qla_tgt_cmd_cachep; static struct kmem_cache *qla_tgt_mgmt_cmd_cachep; static mempool_t *qla_tgt_mgmt_cmd_mempool; static struct workqueue_struct *qla_tgt_wq; @@ -182,6 +181,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, struct atio_from_isp *atio) { + ql_dbg(ql_dbg_tgt, vha, 0xe072, + "%s: qla_target(%d): type %x ox_id %04x\n", + __func__, vha->vp_idx, atio->u.raw.entry_type, + be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + switch (atio->u.raw.entry_type) { case ATIO_TYPE7: { @@ -236,6 +240,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt) { switch (pkt->entry_type) { + case CTIO_CRC2: + ql_dbg(ql_dbg_tgt, vha, 0xe073, + "qla_target(%d):%s: CRC2 Response pkt\n", + vha->vp_idx, __func__); case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; @@ -1120,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, ctio->u.status1.flags = __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); - ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; + ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); qla2x00_start_iocbs(vha, vha->req); @@ -1254,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, { struct atio_from_isp *atio = &mcmd->orig_iocb.atio; struct ctio7_to_24xx *ctio; + uint16_t temp; ql_dbg(ql_dbg_tgt, ha, 0xe008, "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", @@ -1284,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha, ctio->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); - ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio->u.status1.ox_id = cpu_to_le16(temp); ctio->u.status1.scsi_status = __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); ctio->u.status1.response_len = __constant_cpu_to_le16(8); @@ -1350,13 +1360,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm) prm->cmd->sg_mapped = 1; - /* - * If greater than four sg entries then we need to allocate - * the continuation entries - */ - if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) - prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - - prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { + /* + * If greater than four sg entries then we need to allocate + * the continuation entries + */ + if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) + prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - + prm->tgt->datasegs_per_cmd, + prm->tgt->datasegs_per_cont); + } else { + /* DIF */ + if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || + (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { + prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); + prm->tot_dsds = prm->seg_cnt; + } else + prm->tot_dsds = prm->seg_cnt; + + if (cmd->prot_sg_cnt) { + prm->prot_sg = cmd->prot_sg; + prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, + cmd->prot_sg, cmd->prot_sg_cnt, + cmd->dma_data_direction); + if (unlikely(prm->prot_seg_cnt == 0)) + goto out_err; + + if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || + (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { + /* Dif Bundling not support here */ + prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, + cmd->blk_sz); + prm->tot_dsds += prm->prot_seg_cnt; + } else + prm->tot_dsds += prm->prot_seg_cnt; + } + } ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n", prm->seg_cnt, prm->req_cnt); @@ -1377,6 +1416,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha, BUG_ON(!cmd->sg_mapped); pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); cmd->sg_mapped = 0; + + if (cmd->prot_sg_cnt) + pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, + cmd->dma_data_direction); + + if (cmd->ctx_dsd_alloced) + qla2x00_clean_dsd_pool(ha, NULL, cmd); + + if (cmd->ctx) + dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma); } static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, @@ -1466,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, struct ctio7_to_24xx *pkt; struct qla_hw_data *ha = vha->hw; struct atio_from_isp *atio = &prm->cmd->atio; + uint16_t temp; pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; prm->pkt = pkt; @@ -1494,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; pkt->exchange_addr = atio->u.isp24.exchange_addr; pkt->u.status0.flags |= (atio->u.isp24.attr << 9); - pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); ql_dbg(ql_dbg_tgt, vha, 0xe00c, "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", - vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, - le16_to_cpu(pkt->u.status0.ox_id)); + vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp); return 0; } @@ -1665,8 +1715,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; } - ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", - vha->vp_idx, cmd->tag); + ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n", + vha->vp_idx, cmd->tag, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); prm->cmd = cmd; prm->tgt = tgt; @@ -1902,6 +1953,328 @@ skip_explict_conf: /* Sense with len > 24, is it possible ??? */ } + + +/* diff */ +static inline int +qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) +{ + /* + * Uncomment when corresponding SCSI changes are done. + * + if (!sp->cmd->prot_chk) + return 0; + * + */ + switch (se_cmd->prot_op) { + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_STRIP: + if (ql2xenablehba_err_chk >= 1) + return 1; + break; + case TARGET_PROT_DOUT_PASS: + case TARGET_PROT_DIN_PASS: + if (ql2xenablehba_err_chk >= 2) + return 1; + break; + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_STRIP: + return 1; + default: + break; + } + return 0; +} + +/* + * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command + * + */ +static inline void +qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) +{ + uint32_t lba = 0xffffffff & se_cmd->t_task_lba; + + /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 + * have been immplemented by TCM, before AppTag is avail. + * Look for modesense_handlers[] + */ + ctx->app_tag = 0; + ctx->app_tag_mask[0] = 0x0; + ctx->app_tag_mask[1] = 0x0; + + switch (se_cmd->prot_type) { + case TARGET_DIF_TYPE0_PROT: + /* + * No check for ql2xenablehba_err_chk, as it would be an + * I/O error if hba tag generation is not done. + */ + ctx->ref_tag = cpu_to_le32(lba); + + if (!qlt_hba_err_chk_enabled(se_cmd)) + break; + + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + /* + * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and + * 16 bit app tag. + */ + case TARGET_DIF_TYPE1_PROT: + ctx->ref_tag = cpu_to_le32(lba); + + if (!qlt_hba_err_chk_enabled(se_cmd)) + break; + + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + /* + * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to + * match LBA in CDB + N + */ + case TARGET_DIF_TYPE2_PROT: + ctx->ref_tag = cpu_to_le32(lba); + + if (!qlt_hba_err_chk_enabled(se_cmd)) + break; + + /* enable ALL bytes of the ref tag */ + ctx->ref_tag_mask[0] = 0xff; + ctx->ref_tag_mask[1] = 0xff; + ctx->ref_tag_mask[2] = 0xff; + ctx->ref_tag_mask[3] = 0xff; + break; + + /* For Type 3 protection: 16 bit GUARD only */ + case TARGET_DIF_TYPE3_PROT: + ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = + ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; + break; + } +} + + +static inline int +qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) +{ + uint32_t *cur_dsd; + int sgc; + uint32_t transfer_length = 0; + uint32_t data_bytes; + uint32_t dif_bytes; + uint8_t bundling = 1; + uint8_t *clr_ptr; + struct crc_context *crc_ctx_pkt = NULL; + struct qla_hw_data *ha; + struct ctio_crc2_to_fw *pkt; + dma_addr_t crc_ctx_dma; + uint16_t fw_prot_opts = 0; + struct qla_tgt_cmd *cmd = prm->cmd; + struct se_cmd *se_cmd = &cmd->se_cmd; + uint32_t h; + struct atio_from_isp *atio = &prm->cmd->atio; + uint16_t t16; + + sgc = 0; + ha = vha->hw; + + pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; + prm->pkt = pkt; + memset(pkt, 0, sizeof(*pkt)); + + ql_dbg(ql_dbg_tgt, vha, 0xe071, + "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", + vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, + prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); + + if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || + (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) + bundling = 0; + + /* Compute dif len and adjust data len to incude protection */ + data_bytes = cmd->bufflen; + dif_bytes = (data_bytes / cmd->blk_sz) * 8; + + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_STRIP: + transfer_length = data_bytes; + data_bytes += dif_bytes; + break; + + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_INSERT: + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + transfer_length = data_bytes + dif_bytes; + break; + + default: + BUG(); + break; + } + + if (!qlt_hba_err_chk_enabled(se_cmd)) + fw_prot_opts |= 0x10; /* Disable Guard tag checking */ + /* HBA error checking enabled */ + else if (IS_PI_UNINIT_CAPABLE(ha)) { + if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || + (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) + fw_prot_opts |= PO_DIS_VALD_APP_ESC; + else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) + fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; + } + + switch (se_cmd->prot_op) { + case TARGET_PROT_DIN_INSERT: + case TARGET_PROT_DOUT_INSERT: + fw_prot_opts |= PO_MODE_DIF_INSERT; + break; + case TARGET_PROT_DIN_STRIP: + case TARGET_PROT_DOUT_STRIP: + fw_prot_opts |= PO_MODE_DIF_REMOVE; + break; + case TARGET_PROT_DIN_PASS: + case TARGET_PROT_DOUT_PASS: + fw_prot_opts |= PO_MODE_DIF_PASS; + /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ + break; + default:/* Normal Request */ + fw_prot_opts |= PO_MODE_DIF_PASS; + break; + } + + + /* ---- PKT ---- */ + /* Update entry type to indicate Command Type CRC_2 IOCB */ + pkt->entry_type = CTIO_CRC2; + pkt->entry_count = 1; + pkt->vp_index = vha->vp_idx; + + h = qlt_make_handle(vha); + if (unlikely(h == QLA_TGT_NULL_HANDLE)) { + /* + * CTIO type 7 from the firmware doesn't provide a way to + * know the initiator's LOOP ID, hence we can't find + * the session and, so, the command. + */ + return -EAGAIN; + } else + ha->tgt.cmds[h-1] = prm->cmd; + + + pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; + pkt->nport_handle = prm->cmd->loop_id; + pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); + pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; + pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; + pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + pkt->exchange_addr = atio->u.isp24.exchange_addr; + + /* silence compile warning */ + t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + pkt->ox_id = cpu_to_le16(t16); + + t16 = (atio->u.isp24.attr << 9); + pkt->flags |= cpu_to_le16(t16); + pkt->relative_offset = cpu_to_le32(prm->cmd->offset); + + /* Set transfer direction */ + if (cmd->dma_data_direction == DMA_TO_DEVICE) + pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); + else if (cmd->dma_data_direction == DMA_FROM_DEVICE) + pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); + + + pkt->dseg_count = prm->tot_dsds; + /* Fibre channel byte count */ + pkt->transfer_length = cpu_to_le32(transfer_length); + + + /* ----- CRC context -------- */ + + /* Allocate CRC context from global pool */ + crc_ctx_pkt = cmd->ctx = + dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); + + if (!crc_ctx_pkt) + goto crc_queuing_error; + + /* Zero out CTX area. */ + clr_ptr = (uint8_t *)crc_ctx_pkt; + memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); + + crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; + INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); + + /* Set handle */ + crc_ctx_pkt->handle = pkt->handle; + + qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); + + pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); + pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); + pkt->crc_context_len = CRC_CONTEXT_LEN_FW; + + + if (!bundling) { + cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; + } else { + /* + * Configure Bundling if we need to fetch interlaving + * protection PCI accesses + */ + fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; + crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); + crc_ctx_pkt->u.bundling.dseg_count = + cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); + cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; + } + + /* Finish the common fields of CRC pkt */ + crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); + crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); + crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); + crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); + + + /* Walks data segments */ + pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); + + if (!bundling && prm->prot_seg_cnt) { + if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, + prm->tot_dsds, cmd)) + goto crc_queuing_error; + } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, + (prm->tot_dsds - prm->prot_seg_cnt), cmd)) + goto crc_queuing_error; + + if (bundling && prm->prot_seg_cnt) { + /* Walks dif segments */ + pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; + + cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; + if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, + prm->prot_seg_cnt, cmd)) + goto crc_queuing_error; + } + return QLA_SUCCESS; + +crc_queuing_error: + /* Cleanup will be performed by the caller */ + + return QLA_FUNCTION_FAILED; +} + + /* * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * * QLA_TGT_XMIT_STATUS for >= 24xx silicon @@ -1921,9 +2294,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, qlt_check_srr_debug(cmd, &xmit_type); ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, - "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " - "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? - 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); + "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", + (xmit_type & QLA_TGT_XMIT_STATUS) ? + 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, + &cmd->se_cmd); res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); @@ -1941,7 +2315,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (unlikely(res)) goto out_unmap_unlock; - res = qlt_24xx_build_ctio_pkt(&prm, vha); + if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) + res = qlt_build_ctio_crc2_pkt(&prm, vha); + else + res = qlt_24xx_build_ctio_pkt(&prm, vha); if (unlikely(res != 0)) goto out_unmap_unlock; @@ -1953,7 +2330,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | CTIO7_FLAGS_STATUS_MODE_0); - qlt_load_data_segments(&prm, vha); + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) + qlt_load_data_segments(&prm, vha); if (prm.add_status_pkt == 0) { if (xmit_type & QLA_TGT_XMIT_STATUS) { @@ -1983,8 +2361,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, ql_dbg(ql_dbg_tgt, vha, 0xe019, "Building additional status packet\n"); + /* + * T10Dif: ctio_crc2_to_fw overlay ontop of + * ctio7_to_24xx + */ memcpy(ctio, pkt, sizeof(*ctio)); + /* reset back to CTIO7 */ ctio->entry_count = 1; + ctio->entry_type = CTIO_TYPE7; ctio->dseg_count = 0; ctio->u.status1.flags &= ~__constant_cpu_to_le16( CTIO7_FLAGS_DATA_IN); @@ -1993,6 +2377,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; pkt->u.status0.flags |= __constant_cpu_to_le16( CTIO7_FLAGS_DONT_RET_CTIO); + + /* qlt_24xx_init_ctio_to_isp will correct + * all neccessary fields that's part of CTIO7. + * There should be no residual of CTIO-CRC2 data. + */ qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio, &prm); pr_debug("Status CTIO7: %p\n", ctio); @@ -2041,8 +2430,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) return -EIO; - ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", - (int)vha->vp_idx); + ql_dbg(ql_dbg_tgt, vha, 0xe01b, + "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n", + __func__, (int)vha->vp_idx, &cmd->se_cmd, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); /* Calculate number of entries and segments required */ if (qlt_pci_map_calc_cnt(&prm) != 0) @@ -2054,14 +2445,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) res = qlt_check_reserve_free_req(vha, prm.req_cnt); if (res != 0) goto out_unlock_free_unmap; + if (cmd->se_cmd.prot_op) + res = qlt_build_ctio_crc2_pkt(&prm, vha); + else + res = qlt_24xx_build_ctio_pkt(&prm, vha); - res = qlt_24xx_build_ctio_pkt(&prm, vha); if (unlikely(res != 0)) goto out_unlock_free_unmap; pkt = (struct ctio7_to_24xx *)prm.pkt; pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | CTIO7_FLAGS_STATUS_MODE_0); - qlt_load_data_segments(&prm, vha); + + if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) + qlt_load_data_segments(&prm, vha); cmd->state = QLA_TGT_STATE_NEED_DATA; @@ -2079,6 +2475,143 @@ out_unlock_free_unmap: } EXPORT_SYMBOL(qlt_rdy_to_xfer); + +/* + * Checks the guard or meta-data for the type of error + * detected by the HBA. + */ +static inline int +qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, + struct ctio_crc_from_fw *sts) +{ + uint8_t *ap = &sts->actual_dif[0]; + uint8_t *ep = &sts->expected_dif[0]; + uint32_t e_ref_tag, a_ref_tag; + uint16_t e_app_tag, a_app_tag; + uint16_t e_guard, a_guard; + uint64_t lba = cmd->se_cmd.t_task_lba; + + a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); + a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); + a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); + + e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); + e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); + e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); + + ql_dbg(ql_dbg_tgt, vha, 0xe075, + "iocb(s) %p Returned STATUS.\n", sts); + + ql_dbg(ql_dbg_tgt, vha, 0xf075, + "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", + cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, + a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); + + /* + * Ignore sector if: + * For type 3: ref & app tag is all 'f's + * For type 0,1,2: app tag is all 'f's + */ + if ((a_app_tag == 0xffff) && + ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || + (a_ref_tag == 0xffffffff))) { + uint32_t blocks_done; + + /* 2TB boundary case covered automatically with this */ + blocks_done = e_ref_tag - (uint32_t)lba + 1; + cmd->se_cmd.bad_sector = e_ref_tag; + cmd->se_cmd.pi_err = 0; + ql_dbg(ql_dbg_tgt, vha, 0xf074, + "need to return scsi good\n"); + + /* Update protection tag */ + if (cmd->prot_sg_cnt) { + uint32_t i, j = 0, k = 0, num_ent; + struct scatterlist *sg, *sgl; + + + sgl = cmd->prot_sg; + + /* Patch the corresponding protection tags */ + for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { + num_ent = sg_dma_len(sg) / 8; + if (k + num_ent < blocks_done) { + k += num_ent; + continue; + } + j = blocks_done - k - 1; + k = blocks_done; + break; + } + + if (k != blocks_done) { + ql_log(ql_log_warn, vha, 0xf076, + "unexpected tag values tag:lba=%u:%llu)\n", + e_ref_tag, (unsigned long long)lba); + goto out; + } + +#if 0 + struct sd_dif_tuple *spt; + /* TODO: + * This section came from initiator. Is it valid here? + * should ulp be override with actual val??? + */ + spt = page_address(sg_page(sg)) + sg->offset; + spt += j; + + spt->app_tag = 0xffff; + if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) + spt->ref_tag = 0xffffffff; +#endif + } + + return 0; + } + + /* check guard */ + if (e_guard != a_guard) { + cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; + cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; + + ql_log(ql_log_warn, vha, 0xe076, + "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", + cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, + a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, + a_guard, e_guard, cmd); + goto out; + } + + /* check ref tag */ + if (e_ref_tag != a_ref_tag) { + cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; + cmd->se_cmd.bad_sector = e_ref_tag; + + ql_log(ql_log_warn, vha, 0xe077, + "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", + cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, + a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, + a_guard, e_guard, cmd); + goto out; + } + + /* check appl tag */ + if (e_app_tag != a_app_tag) { + cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; + cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; + + ql_log(ql_log_warn, vha, 0xe078, + "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", + cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, + a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, + a_guard, e_guard, cmd); + goto out; + } +out: + return 1; +} + + /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, @@ -2089,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; request_t *pkt; int ret = 0; + uint16_t temp; ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); @@ -2125,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); - ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); + temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); + ctio24->u.status1.ox_id = cpu_to_le16(temp); /* Most likely, it isn't needed */ ctio24->u.status1.residual = get_unaligned((uint32_t *) @@ -2155,21 +2690,46 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, rc = __qlt_send_term_exchange(vha, cmd, atio); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); done: - if (rc == 1) { + /* + * Terminate exchange will tell fw to release any active CTIO + * that's in FW posession and cleanup the exchange. + * + * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still + * down at FW. Free the cmd later when CTIO comes back later + * w/aborted(0x2) status. + * + * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already + * back w/some err. Free the cmd now. + */ + if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) { if (!ha_locked && !in_interrupt()) msleep(250); /* just in case */ + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd); } + return; } void qlt_free_cmd(struct qla_tgt_cmd *cmd) { - BUG_ON(cmd->sg_mapped); + struct qla_tgt_sess *sess = cmd->sess; + ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, + "%s: se_cmd[%p] ox_id %04x\n", + __func__, &cmd->se_cmd, + be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + + BUG_ON(cmd->sg_mapped); if (unlikely(cmd->free_sg)) kfree(cmd->sg); - kmem_cache_free(qla_tgt_cmd_cachep, cmd); + + if (!sess || !sess->se_sess) { + WARN_ON(1); + return; + } + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); } EXPORT_SYMBOL(qlt_free_cmd); @@ -2374,6 +2934,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, case CTIO_LIP_RESET: case CTIO_TARGET_RESET: case CTIO_ABORTED: + /* driver request abort via Terminate exchange */ case CTIO_TIMEOUT: case CTIO_INVALID_RX_ID: /* They are OK */ @@ -2404,18 +2965,58 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, else return; + case CTIO_DIF_ERROR: { + struct ctio_crc_from_fw *crc = + (struct ctio_crc_from_fw *)ctio; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, + "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", + vha->vp_idx, status, cmd->state, se_cmd, + *((u64 *)&crc->actual_dif[0]), + *((u64 *)&crc->expected_dif[0])); + + if (qlt_handle_dif_error(vha, cmd, ctio)) { + if (cmd->state == QLA_TGT_STATE_NEED_DATA) { + /* scsi Write/xfer rdy complete */ + goto skip_term; + } else { + /* scsi read/xmit respond complete + * call handle dif to send scsi status + * rather than terminate exchange. + */ + cmd->state = QLA_TGT_STATE_PROCESSED; + ha->tgt.tgt_ops->handle_dif_err(cmd); + return; + } + } else { + /* Need to generate a SCSI good completion. + * because FW did not send scsi status. + */ + status = 0; + goto skip_term; + } + break; + } default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, - "qla_target(%d): CTIO with error status " - "0x%x received (state %x, se_cmd %p\n", + "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", vha->vp_idx, status, cmd->state, se_cmd); break; } - if (cmd->state != QLA_TGT_STATE_NEED_DATA) + + /* "cmd->state == QLA_TGT_STATE_ABORTED" means + * cmd is already aborted/terminated, we don't + * need to terminate again. The exchange is already + * cleaned up/freed at FW level. Just cleanup at driver + * level. + */ + if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && + (cmd->state != QLA_TGT_STATE_ABORTED)) { if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) return; + } } +skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); @@ -2444,7 +3045,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, "not return a CTIO complete\n", vha->vp_idx, cmd->state); } - if (unlikely(status != CTIO_SUCCESS)) { + if (unlikely(status != CTIO_SUCCESS) && + (cmd->state != QLA_TGT_STATE_ABORTED)) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n"); dump_stack(); } @@ -2489,13 +3091,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *, /* * Process context for I/O path into tcm_qla2xxx code */ -static void qlt_do_work(struct work_struct *work) +static void __qlt_do_work(struct qla_tgt_cmd *cmd) { - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); scsi_qla_host_t *vha = cmd->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; - struct qla_tgt_sess *sess = NULL; + struct qla_tgt_sess *sess = cmd->sess; struct atio_from_isp *atio = &cmd->atio; unsigned char *cdb; unsigned long flags; @@ -2505,41 +3106,6 @@ static void qlt_do_work(struct work_struct *work) if (tgt->tgt_stop) goto out_term; - spin_lock_irqsave(&ha->hardware_lock, flags); - sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, - atio->u.isp24.fcp_hdr.s_id); - /* Do kref_get() before dropping qla_hw_data->hardware_lock. */ - if (sess) - kref_get(&sess->se_sess->sess_kref); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - - if (unlikely(!sess)) { - uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id; - - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, - "qla_target(%d): Unable to find wwn login" - " (s_id %x:%x:%x), trying to create it manually\n", - vha->vp_idx, s_id[0], s_id[1], s_id[2]); - - if (atio->u.raw.entry_count > 1) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, - "Dropping multy entry cmd %p\n", cmd); - goto out_term; - } - - mutex_lock(&vha->vha_tgt.tgt_mutex); - sess = qlt_make_local_sess(vha, s_id); - /* sess has an extra creation ref. */ - mutex_unlock(&vha->vha_tgt.tgt_mutex); - - if (!sess) - goto out_term; - } - - cmd->sess = sess; - cmd->loop_id = sess->loop_id; - cmd->conf_compl_supported = sess->conf_compl_supported; - cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( @@ -2563,11 +3129,12 @@ static void qlt_do_work(struct work_struct *work) atio->u.isp24.fcp_cmnd.add_cdb_len])); ql_dbg(ql_dbg_tgt, vha, 0xe022, - "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", - cmd, cmd->unpacked_lun, cmd->tag); + "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n", + cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, + cmd->atio.u.isp24.fcp_hdr.ox_id); - ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, - fcp_task_attr, data_dir, bidi); + ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, + fcp_task_attr, data_dir, bidi); if (ret != 0) goto out_term; /* @@ -2586,17 +3153,114 @@ out_term: */ spin_lock_irqsave(&ha->hardware_lock, flags); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); - kmem_cache_free(qla_tgt_cmd_cachep, cmd); - if (sess) + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); + ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static void qlt_do_work(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + + __qlt_do_work(cmd); +} + +static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, + struct qla_tgt_sess *sess, + struct atio_from_isp *atio) +{ + struct se_session *se_sess = sess->se_sess; + struct qla_tgt_cmd *cmd; + int tag; + + tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); + if (tag < 0) + return NULL; + + cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; + memset(cmd, 0, sizeof(struct qla_tgt_cmd)); + + memcpy(&cmd->atio, atio, sizeof(*atio)); + cmd->state = QLA_TGT_STATE_NEW; + cmd->tgt = vha->vha_tgt.qla_tgt; + cmd->vha = vha; + cmd->se_cmd.map_tag = tag; + cmd->sess = sess; + cmd->loop_id = sess->loop_id; + cmd->conf_compl_supported = sess->conf_compl_supported; + + return cmd; +} + +static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, + uint16_t); + +static void qlt_create_sess_from_atio(struct work_struct *work) +{ + struct qla_tgt_sess_op *op = container_of(work, + struct qla_tgt_sess_op, work); + scsi_qla_host_t *vha = op->vha; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess; + struct qla_tgt_cmd *cmd; + unsigned long flags; + uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, + "qla_target(%d): Unable to find wwn login" + " (s_id %x:%x:%x), trying to create it manually\n", + vha->vp_idx, s_id[0], s_id[1], s_id[2]); + + if (op->atio.u.raw.entry_count > 1) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, + "Dropping multy entry atio %p\n", &op->atio); + goto out_term; + } + + mutex_lock(&vha->vha_tgt.tgt_mutex); + sess = qlt_make_local_sess(vha, s_id); + /* sess has an extra creation ref. */ + mutex_unlock(&vha->vha_tgt.tgt_mutex); + + if (!sess) + goto out_term; + /* + * Now obtain a pre-allocated session tag using the original op->atio + * packet header, and dispatch into __qlt_do_work() using the existing + * process context. + */ + cmd = qlt_get_tag(vha, sess, &op->atio); + if (!cmd) { + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY); ha->tgt.tgt_ops->put_sess(sess); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + kfree(op); + return; + } + /* + * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release + * the extra reference taken above by qlt_make_local_sess() + */ + __qlt_do_work(cmd); + kfree(op); + return; + +out_term: + spin_lock_irqsave(&ha->hardware_lock, flags); + qlt_send_term_exchange(vha, NULL, &op->atio, 1); spin_unlock_irqrestore(&ha->hardware_lock, flags); + kfree(op); + } /* ha->hardware_lock supposed to be held on entry */ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, struct atio_from_isp *atio) { + struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_tgt_sess *sess; struct qla_tgt_cmd *cmd; if (unlikely(tgt->tgt_stop)) { @@ -2605,18 +3269,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -EFAULT; } - cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); + if (unlikely(!sess)) { + struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), + GFP_ATOMIC); + if (!op) + return -ENOMEM; + + memcpy(&op->atio, atio, sizeof(*atio)); + INIT_WORK(&op->work, qlt_create_sess_from_atio); + queue_work(qla_tgt_wq, &op->work); + return 0; + } + /* + * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. + */ + kref_get(&sess->se_sess->sess_kref); + + cmd = qlt_get_tag(vha, sess, atio); if (!cmd) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); + ha->tgt.tgt_ops->put_sess(sess); return -ENOMEM; } - memcpy(&cmd->atio, atio, sizeof(*atio)); - cmd->state = QLA_TGT_STATE_NEW; - cmd->tgt = vha->vha_tgt.qla_tgt; - cmd->vha = vha; - INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; @@ -3527,11 +4204,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, switch (atio->u.raw.entry_type) { case ATIO_TYPE7: ql_dbg(ql_dbg_tgt, vha, 0xe02d, - "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " - "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", + "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n", vha->vp_idx, atio->u.isp24.fcp_cmnd.lun, atio->u.isp24.fcp_cmnd.rddata, atio->u.isp24.fcp_cmnd.wrdata, + atio->u.isp24.fcp_cmnd.cdb[0], atio->u.isp24.fcp_cmnd.add_cdb_len, be32_to_cpu(get_unaligned((uint32_t *) &atio->u.isp24.fcp_cmnd.add_cdb[ @@ -3629,11 +4306,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) tgt->irq_cmd_count++; switch (pkt->entry_type) { + case CTIO_CRC2: case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; - ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", - vha->vp_idx); + ql_dbg(ql_dbg_tgt, vha, 0xe030, + "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n", + entry->entry_type, vha->vp_idx); qlt_do_ctio_completion(vha, entry->handle, le16_to_cpu(entry->status)|(pkt->entry_status << 16), entry); @@ -4768,6 +5447,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha, case ABTS_RESP_24XX: case CTIO_TYPE7: case NOTIFY_ACK_TYPE: + case CTIO_CRC2: return 1; default: return 0; @@ -4911,23 +5591,13 @@ int __init qlt_init(void) if (!QLA_TGT_MODE_ENABLED()) return 0; - qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", - sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, - NULL); - if (!qla_tgt_cmd_cachep) { - ql_log(ql_log_fatal, NULL, 0xe06c, - "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); - return -ENOMEM; - } - qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep", sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct qla_tgt_mgmt_cmd), 0, NULL); if (!qla_tgt_mgmt_cmd_cachep) { ql_log(ql_log_fatal, NULL, 0xe06d, "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); - ret = -ENOMEM; - goto out; + return -ENOMEM; } qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, @@ -4955,8 +5625,6 @@ out_cmd_mempool: mempool_destroy(qla_tgt_mgmt_cmd_mempool); out_mgmt_cmd_cachep: kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); -out: - kmem_cache_destroy(qla_tgt_cmd_cachep); return ret; } @@ -4968,5 +5636,4 @@ void qlt_exit(void) destroy_workqueue(qla_tgt_wq); mempool_destroy(qla_tgt_mgmt_cmd_mempool); kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); - kmem_cache_destroy(qla_tgt_cmd_cachep); } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index ce33d8c26406..d1d24fb0160a 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -293,6 +293,7 @@ struct ctio_to_2xxx { #define CTIO_ABORTED 0x02 #define CTIO_INVALID_RX_ID 0x08 #define CTIO_TIMEOUT 0x0B +#define CTIO_DIF_ERROR 0x0C /* DIF error detected */ #define CTIO_LIP_RESET 0x0E #define CTIO_TARGET_RESET 0x17 #define CTIO_PORT_UNAVAILABLE 0x28 @@ -315,7 +316,7 @@ struct fcp_hdr { uint8_t seq_id; uint8_t df_ctl; uint16_t seq_cnt; - uint16_t ox_id; + __be16 ox_id; uint16_t rx_id; uint32_t parameter; } __packed; @@ -440,9 +441,9 @@ struct ctio7_to_24xx { union { struct { uint16_t reserved1; - uint16_t flags; + __le16 flags; uint32_t residual; - uint16_t ox_id; + __le16 ox_id; uint16_t scsi_status; uint32_t relative_offset; uint32_t reserved2; @@ -457,7 +458,7 @@ struct ctio7_to_24xx { uint16_t sense_length; uint16_t flags; uint32_t residual; - uint16_t ox_id; + __le16 ox_id; uint16_t scsi_status; uint16_t response_len; uint16_t reserved; @@ -498,11 +499,12 @@ struct ctio7_from_24xx { #define CTIO7_FLAGS_DONT_RET_CTIO BIT_8 #define CTIO7_FLAGS_STATUS_MODE_0 0 #define CTIO7_FLAGS_STATUS_MODE_1 BIT_6 +#define CTIO7_FLAGS_STATUS_MODE_2 BIT_7 #define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5 #define CTIO7_FLAGS_CONFIRM_SATISF BIT_4 #define CTIO7_FLAGS_DSD_PTR BIT_2 -#define CTIO7_FLAGS_DATA_IN BIT_1 -#define CTIO7_FLAGS_DATA_OUT BIT_0 +#define CTIO7_FLAGS_DATA_IN BIT_1 /* data to initiator */ +#define CTIO7_FLAGS_DATA_OUT BIT_0 /* data from initiator */ #define ELS_PLOGI 0x3 #define ELS_FLOGI 0x4 @@ -514,6 +516,68 @@ struct ctio7_from_24xx { #define ELS_ADISC 0x52 /* + *CTIO Type CRC_2 IOCB + */ +struct ctio_crc2_to_fw { + uint8_t entry_type; /* Entry type. */ +#define CTIO_CRC2 0x7A + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint16_t nport_handle; /* N_PORT handle. */ + __le16 timeout; /* Command timeout. */ + + uint16_t dseg_count; /* Data segment count. */ + uint8_t vp_index; + uint8_t add_flags; /* additional flags */ +#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 + + uint8_t initiator_id[3]; /* initiator ID */ + uint8_t reserved1; + uint32_t exchange_addr; /* rcv exchange address */ + uint16_t reserved2; + __le16 flags; /* refer to CTIO7 flags values */ + uint32_t residual; + __le16 ox_id; + uint16_t scsi_status; + __le32 relative_offset; + uint32_t reserved5; + __le32 transfer_length; /* total fc transfer length */ + uint32_t reserved6; + __le32 crc_context_address[2];/* Data segment address. */ + uint16_t crc_context_len; /* Data segment length. */ + uint16_t reserved_1; /* MUST be set to 0. */ +} __packed; + +/* CTIO Type CRC_x Status IOCB */ +struct ctio_crc_from_fw { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* System handle. */ + uint16_t status; + uint16_t timeout; /* Command timeout. */ + uint16_t dseg_count; /* Data segment count. */ + uint32_t reserved1; + uint16_t state_flags; +#define CTIO_CRC_SF_DIF_CHOPPED BIT_4 + + uint32_t exchange_address; /* rcv exchange address */ + uint16_t reserved2; + uint16_t flags; + uint32_t resid_xfer_length; + uint16_t ox_id; + uint8_t reserved3[12]; + uint16_t runt_guard; /* reported runt blk guard */ + uint8_t actual_dif[8]; + uint8_t expected_dif[8]; +} __packed; + +/* * ISP queue - ABTS received/response entries structure definition for 24xx. */ #define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */ @@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl { int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, unsigned char *, uint32_t, int, int, int); void (*handle_data)(struct qla_tgt_cmd *); + void (*handle_dif_err)(struct qla_tgt_cmd *); int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t, uint32_t); void (*free_cmd)(struct qla_tgt_cmd *); @@ -805,6 +870,12 @@ struct qla_tgt { struct list_head tgt_list_entry; }; +struct qla_tgt_sess_op { + struct scsi_qla_host *vha; + struct atio_from_isp atio; + struct work_struct work; +}; + /* * Equivilant to IT Nexus (Initiator-Target) */ @@ -829,9 +900,9 @@ struct qla_tgt_sess { }; struct qla_tgt_cmd { + struct se_cmd se_cmd; struct qla_tgt_sess *sess; int state; - struct se_cmd se_cmd; struct work_struct free_work; struct work_struct work; /* Sense buffer that will be mapped into outgoing status */ @@ -843,6 +914,7 @@ struct qla_tgt_cmd { unsigned int free_sg:1; unsigned int aborted:1; /* Needed in case of SRR */ unsigned int write_data_transferred:1; + unsigned int ctx_dsd_alloced:1; struct scatterlist *sg; /* cmd data buffer SG vector */ int sg_cnt; /* SG segments count */ @@ -857,6 +929,12 @@ struct qla_tgt_cmd { struct scsi_qla_host *vha; struct atio_from_isp atio; + /* t10dif */ + struct scatterlist *prot_sg; + uint32_t prot_sg_cnt; + uint32_t blk_sz; + struct crc_context *ctx; + }; struct qla_tgt_sess_work_param { @@ -901,6 +979,10 @@ struct qla_tgt_prm { int sense_buffer_len; int residual; int add_status_pkt; + /* dif */ + struct scatterlist *prot_sg; + uint16_t prot_seg_cnt; + uint16_t tot_dsds; }; struct qla_tgt_srr_imm { @@ -976,6 +1058,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *, extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); +extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *); +extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index a804e9b744bb..cb9a0c4bc419 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -201,7 +201,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg, ql_dbg(ql_dbg_misc, NULL, 0xd014, "%s: @%x\n", __func__, offset); } - qla27xx_insert32(offset, buf, len); qla27xx_read32(window, buf, len); } @@ -220,7 +219,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg, static inline void qla27xx_read_window(__iomem struct device_reg_24xx *reg, - uint32_t base, uint offset, uint count, uint width, void *buf, + uint32_t addr, uint offset, uint count, uint width, void *buf, ulong *len) { void *window = (void *)reg + offset; @@ -229,14 +228,14 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg, if (buf) { ql_dbg(ql_dbg_misc, NULL, 0xd016, "%s: base=%x offset=%x count=%x width=%x\n", - __func__, base, offset, count, width); + __func__, addr, offset, count, width); } - qla27xx_write_reg(reg, IOBASE_ADDR, base, buf); + qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); while (count--) { - qla27xx_insert32(base, buf, len); + qla27xx_insert32(addr, buf, len); readn(window, buf, len); window += width; - base += width; + addr++; } } @@ -336,7 +335,8 @@ qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, ql_dbg(ql_dbg_misc, vha, 0xd204, "%s: rdpci [%lx]\n", __func__, *len); - qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len); + qla27xx_insert32(ent->t260.pci_offset, buf, len); + qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len); return false; } @@ -349,7 +349,7 @@ qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, ql_dbg(ql_dbg_misc, vha, 0xd205, "%s: wrpci [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf); + qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf); return false; } @@ -392,9 +392,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, goto done; } - if (end < start) { + if (end < start || end == 0) { ql_dbg(ql_dbg_misc, vha, 0xd023, - "%s: bad range (start=%x end=%x)\n", __func__, + "%s: unusable range (start=%x end=%x)\n", __func__, ent->t262.end_addr, ent->t262.start_addr); qla27xx_skip_entry(ent, buf); goto done; @@ -452,17 +452,15 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, ql_dbg(ql_dbg_misc, vha, 0xd025, "%s: unsupported atio queue\n", __func__); qla27xx_skip_entry(ent, buf); - goto done; } else { ql_dbg(ql_dbg_misc, vha, 0xd026, "%s: unknown queue %u\n", __func__, ent->t263.queue_type); qla27xx_skip_entry(ent, buf); - goto done; } if (buf) ent->t263.num_queues = count; -done: + return false; } @@ -503,7 +501,7 @@ qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, ql_dbg(ql_dbg_misc, vha, 0xd209, "%s: pause risc [%lx]\n", __func__, *len); if (buf) - qla24xx_pause_risc(reg); + qla24xx_pause_risc(reg, vha->hw); return false; } @@ -590,7 +588,6 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); - void *window = (void *)reg + 0xc4; ulong dwords = ent->t270.count; ulong addr = ent->t270.addr; @@ -599,10 +596,9 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); while (dwords--) { qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf); - qla27xx_read_reg(reg, 0xc4, buf, len); qla27xx_insert32(addr, buf, len); - qla27xx_read32(window, buf, len); - addr++; + qla27xx_read_reg(reg, 0xc4, buf, len); + addr += sizeof(uint32_t); } return false; @@ -614,12 +610,12 @@ qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, { struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); ulong addr = ent->t271.addr; + ulong data = ent->t271.data; ql_dbg(ql_dbg_misc, vha, 0xd20f, "%s: wrremreg [%lx]\n", __func__, *len); qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); - qla27xx_read_reg(reg, 0xc4, buf, len); - qla27xx_insert32(addr, buf, len); + qla27xx_write_reg(reg, 0xc4, data, buf); qla27xx_write_reg(reg, 0xc0, addr, buf); return false; @@ -662,9 +658,59 @@ qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, "%s: failed pcicfg read at %lx\n", __func__, addr); qla27xx_insert32(addr, buf, len); qla27xx_insert32(value, buf, len); - addr += 4; + addr += sizeof(uint32_t); + } + + return false; +} + +static int +qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, + struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ + uint count = 0; + uint i; + + ql_dbg(ql_dbg_misc, vha, 0xd212, + "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len); + if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { + for (i = 0; i < vha->hw->max_req_queues; i++) { + struct req_que *req = vha->hw->req_q_map[i]; + if (req || !buf) { + qla27xx_insert16(i, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(req && req->out_ptr ? + *req->out_ptr : 0, buf, len); + count++; + } + } + } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { + for (i = 0; i < vha->hw->max_rsp_queues; i++) { + struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + if (rsp || !buf) { + qla27xx_insert16(i, buf, len); + qla27xx_insert16(1, buf, len); + qla27xx_insert32(rsp && rsp->in_ptr ? + *rsp->in_ptr : 0, buf, len); + count++; + } + } + } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { + ql_dbg(ql_dbg_misc, vha, 0xd02e, + "%s: unsupported atio queue\n", __func__); + qla27xx_skip_entry(ent, buf); + } else { + ql_dbg(ql_dbg_misc, vha, 0xd02f, + "%s: unknown queue %u\n", __func__, ent->t274.queue_type); + qla27xx_skip_entry(ent, buf); } + if (buf) + ent->t274.num_queues = count; + + if (!count) + qla27xx_skip_entry(ent, buf); + return false; } @@ -709,6 +755,7 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = { { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } , { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , + { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } , { -1 , qla27xx_fwdt_entry_other } }; diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index c9d2fff4d964..1967424c8e64 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -1,6 +1,6 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ @@ -52,6 +52,7 @@ struct __packed qla27xx_fwdt_template { #define ENTRY_TYPE_WRREMREG 271 #define ENTRY_TYPE_RDREMRAM 272 #define ENTRY_TYPE_PCICFG 273 +#define ENTRY_TYPE_GET_SHADOW 274 #define CAPTURE_FLAG_PHYS_ONLY BIT_0 #define CAPTURE_FLAG_PHYS_VIRT BIT_1 @@ -109,12 +110,12 @@ struct __packed qla27xx_fwdt_entry { } t259; struct __packed { - uint8_t pci_addr; + uint8_t pci_offset; uint8_t reserved[3]; } t260; struct __packed { - uint8_t pci_addr; + uint8_t pci_offset; uint8_t reserved[3]; uint32_t write_data; } t261; @@ -186,6 +187,12 @@ struct __packed qla27xx_fwdt_entry { uint32_t addr; uint32_t count; } t273; + + struct __packed { + uint32_t num_queues; + uint8_t queue_type; + uint8_t reserved[3]; + } t274; }; }; @@ -202,4 +209,8 @@ struct __packed qla27xx_fwdt_entry { #define T268_BUF_TYPE_EXCH_BUFOFF 2 #define T268_BUF_TYPE_EXTD_LOGIN 3 +#define T274_QUEUE_TYPE_REQ_SHAD 1 +#define T274_QUEUE_TYPE_RSP_SHAD 2 +#define T274_QUEUE_TYPE_ATIO_SHAD 3 + #endif diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index e36b94712544..4d2c98cbec4f 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -1,13 +1,13 @@ /* * QLogic Fibre Channel HBA Driver - * Copyright (c) 2003-2013 QLogic Corporation + * Copyright (c) 2003-2014 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ /* * Driver version */ -#define QLA2XXX_VERSION "8.07.00.02-k" +#define QLA2XXX_VERSION "8.07.00.08-k" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 7 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 68fb66fdb757..e2beab962096 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -472,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; + cmd->prot_sg_cnt = se_cmd->t_prot_nents; + cmd->prot_sg = se_cmd->t_prot_sg; + cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; + se_cmd->pi_err = 0; + /* * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup * the SGL mappings into PCIe memory for incoming FCP WRITE data. @@ -567,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) return; } - transport_generic_request_failure(&cmd->se_cmd, - TCM_CHECK_CONDITION_ABORT_CMD); + if (cmd->se_cmd.pi_err) + transport_generic_request_failure(&cmd->se_cmd, + cmd->se_cmd.pi_err); + else + transport_generic_request_failure(&cmd->se_cmd, + TCM_CHECK_CONDITION_ABORT_CMD); + return; } @@ -584,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) queue_work(tcm_qla2xxx_free_wq, &cmd->work); } +static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) +{ + struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + + /* take an extra kref to prevent cmd free too early. + * need to wait for SCSI status/check condition to + * finish responding generate by transport_generic_request_failure. + */ + kref_get(&cmd->se_cmd.cmd_kref); + transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); +} + +/* + * Called from qla_target.c:qlt_do_ctio_completion() + */ +static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) +{ + INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); + queue_work(tcm_qla2xxx_free_wq, &cmd->work); +} + /* * Called from qla_target.c:qlt_issue_task_mgmt() */ @@ -610,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->sg = se_cmd->t_data_sg; cmd->offset = 0; + cmd->prot_sg_cnt = se_cmd->t_prot_nents; + cmd->prot_sg = se_cmd->t_prot_sg; + cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; + se_cmd->pi_err = 0; + /* * Now queue completed DATA_IN the qla2xxx LLD and response ring */ @@ -1465,6 +1501,8 @@ static int tcm_qla2xxx_check_initiator_node_acl( struct qla_tgt_sess *sess = qla_tgt_sess; unsigned char port_name[36]; unsigned long flags; + int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count : + TCM_QLA2XXX_DEFAULT_TAGS; lport = vha->vha_tgt.target_lport_ptr; if (!lport) { @@ -1482,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl( } se_tpg = &tpg->se_tpg; - se_sess = transport_init_session(TARGET_PROT_NORMAL); + se_sess = transport_init_session_tags(num_tags, + sizeof(struct qla_tgt_cmd), + TARGET_PROT_NORMAL); if (IS_ERR(se_sess)) { pr_err("Unable to initialize struct se_session\n"); return PTR_ERR(se_sess); @@ -1600,6 +1640,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { .handle_cmd = tcm_qla2xxx_handle_cmd, .handle_data = tcm_qla2xxx_handle_data, + .handle_dif_err = tcm_qla2xxx_handle_dif_err, .handle_tmr = tcm_qla2xxx_handle_tmr, .free_cmd = tcm_qla2xxx_free_cmd, .free_mcmd = tcm_qla2xxx_free_mcmd, diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 33aaac8c7d59..10c002145648 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -4,6 +4,11 @@ #define TCM_QLA2XXX_VERSION "v0.1" /* length of ASCII WWPNs including pad */ #define TCM_QLA2XXX_NAMELEN 32 +/* + * Number of pre-allocated per-session tags, based upon the worst-case + * per port number of iocbs + */ +#define TCM_QLA2XXX_DEFAULT_TAGS 2088 #include "qla_target.h" diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c index 2eba35365920..556c1525f881 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.c +++ b/drivers/scsi/qla4xxx/ql4_83xx.c @@ -249,110 +249,6 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) qla4_83xx_flash_unlock(ha); } -/** - * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory - * @ha: Pointer to adapter structure - * @addr: Flash address to write to - * @data: Data to be written - * @count: word_count to be written - * - * Return: On success return QLA_SUCCESS - * On error return QLA_ERROR - **/ -int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, - uint32_t *data, uint32_t count) -{ - int i, j; - uint32_t agt_ctrl; - unsigned long flags; - int ret_val = QLA_SUCCESS; - - /* Only 128-bit aligned access */ - if (addr & 0xF) { - ret_val = QLA_ERROR; - goto exit_ms_mem_write; - } - - write_lock_irqsave(&ha->hw_lock, flags); - - /* Write address */ - ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); - if (ret_val == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", - __func__); - goto exit_ms_mem_write_unlock; - } - - for (i = 0; i < count; i++, addr += 16) { - if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, - QLA8XXX_ADDR_QDR_NET_MAX)) || - (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, - QLA8XXX_ADDR_DDR_NET_MAX)))) { - ret_val = QLA_ERROR; - goto exit_ms_mem_write_unlock; - } - - ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, - addr); - /* Write data */ - ret_val |= qla4_83xx_wr_reg_indirect(ha, - MD_MIU_TEST_AGT_WRDATA_LO, - *data++); - ret_val |= qla4_83xx_wr_reg_indirect(ha, - MD_MIU_TEST_AGT_WRDATA_HI, - *data++); - ret_val |= qla4_83xx_wr_reg_indirect(ha, - MD_MIU_TEST_AGT_WRDATA_ULO, - *data++); - ret_val |= qla4_83xx_wr_reg_indirect(ha, - MD_MIU_TEST_AGT_WRDATA_UHI, - *data++); - if (ret_val == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", - __func__); - goto exit_ms_mem_write_unlock; - } - - /* Check write status */ - ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, - MIU_TA_CTL_WRITE_ENABLE); - ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, - MIU_TA_CTL_WRITE_START); - if (ret_val == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", - __func__); - goto exit_ms_mem_write_unlock; - } - - for (j = 0; j < MAX_CTL_CHECK; j++) { - ret_val = qla4_83xx_rd_reg_indirect(ha, - MD_MIU_TEST_AGT_CTRL, - &agt_ctrl); - if (ret_val == QLA_ERROR) { - ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", - __func__); - goto exit_ms_mem_write_unlock; - } - if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) - break; - } - - /* Status check failed */ - if (j >= MAX_CTL_CHECK) { - printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", - __func__); - ret_val = QLA_ERROR; - goto exit_ms_mem_write_unlock; - } - } - -exit_ms_mem_write_unlock: - write_unlock_irqrestore(&ha->hw_lock, flags); - -exit_ms_mem_write: - return ret_val; -} - #define INTENT_TO_RECOVER 0x01 #define PROCEED_TO_RECOVER 0x02 @@ -760,7 +656,7 @@ static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha) __func__)); /* 128 bit/16 byte write to MS memory */ - ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, + ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, count); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h index a0de6e25ea5a..775fdf9fcc87 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.h +++ b/drivers/scsi/qla4xxx/ql4_83xx.h @@ -254,6 +254,50 @@ struct qla83xx_minidump_entry_pollrd { uint32_t rsvd_1; }; +struct qla8044_minidump_entry_rddfe { + struct qla8xxx_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t value; + uint8_t stride; + uint8_t stride2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t modify_mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + +struct qla8044_minidump_entry_rdmdio { + struct qla8xxx_minidump_entry_hdr h; + + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint8_t stride_1; + uint8_t stride_2; + uint16_t count; + uint32_t poll; + uint32_t mask; + uint32_t value_2; + uint32_t data_size; + +} __packed; + +struct qla8044_minidump_entry_pollwr { + struct qla8xxx_minidump_entry_hdr h; + uint32_t addr_1; + uint32_t addr_2; + uint32_t value_1; + uint32_t value_2; + uint32_t poll; + uint32_t mask; + uint32_t data_size; + uint32_t rsvd; + +} __packed; + /* RDMUX2 Entry */ struct qla83xx_minidump_entry_rdmux2 { struct qla8xxx_minidump_entry_hdr h; diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 73a502288bde..8f6d0fb2cd80 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -601,6 +601,7 @@ struct scsi_qla_host { #define DPC_HA_NEED_QUIESCENT 22 /* 0x00400000 ISP-82xx only*/ #define DPC_POST_IDC_ACK 23 /* 0x00800000 */ #define DPC_RESTORE_ACB 24 /* 0x01000000 */ +#define DPC_SYSFS_DDB_EXPORT 25 /* 0x02000000 */ struct Scsi_Host *host; /* pointer to host data */ uint32_t tot_ddbs; diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 209853ce0bbc..699575efc9ba 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -1415,6 +1415,9 @@ struct ql_iscsi_stats { #define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN 16 #define QLA83XX_SS_OCM_WNDREG_INDEX 3 #define QLA83XX_SS_PCI_INDEX 0 +#define QLA8022_TEMPLATE_CAP_OFFSET 172 +#define QLA83XX_TEMPLATE_CAP_OFFSET 268 +#define QLA80XX_TEMPLATE_RESERVED_BITS 16 struct qla4_8xxx_minidump_template_hdr { uint32_t entry_type; @@ -1434,6 +1437,7 @@ struct qla4_8xxx_minidump_template_hdr { uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN]; uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN]; uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN]; + uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS]; }; #endif /* _QLA4X_FW_H */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index b1a19cd8d5b2..5f58b451327e 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -274,13 +274,14 @@ int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, uint32_t acb_type, uint32_t len); int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config); -int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, +int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, uint32_t *data, uint32_t count); uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state); int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config); int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config); int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha); int qla4_83xx_is_detached(struct scsi_qla_host *ha); +int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha); extern int ql4xextended_error_logging; extern int ql4xdontresethba; diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 28fbece7e08f..6f12f859b11d 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -282,6 +282,25 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) return ipv4_wait|ipv6_wait; } +static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha, + struct qla4_8xxx_minidump_template_hdr *md_hdr) +{ + int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET : + QLA83XX_TEMPLATE_CAP_OFFSET; + int rval = 1; + uint32_t *cap_offset; + + cap_offset = (uint32_t *)((char *)md_hdr + offset); + + if (!(le32_to_cpu(*cap_offset) & BIT_0)) { + ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n", + *cap_offset); + rval = 0; + } + + return rval; +} + /** * qla4xxx_alloc_fw_dump - Allocate memory for minidump data. * @ha: pointer to host adapter structure. @@ -294,6 +313,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) void *md_tmp; dma_addr_t md_tmp_dma; struct qla4_8xxx_minidump_template_hdr *md_hdr; + int dma_capable; if (ha->fw_dump) { ql4_printk(KERN_WARNING, ha, @@ -326,13 +346,19 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; + dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr); + capture_debug_level = md_hdr->capture_debug_level; /* Get capture mask based on module loadtime setting. */ - if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) + if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) || + (ql4xmdcapmask == 0xFF && dma_capable)) { ha->fw_dump_capture_mask = ql4xmdcapmask; - else + } else { + if (ql4xmdcapmask == 0xFF) + ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n"); ha->fw_dump_capture_mask = capture_debug_level; + } md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; @@ -864,6 +890,8 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha) if (status == QLA_SUCCESS) { if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) qla4xxx_get_crash_record(ha); + + qla4xxx_init_rings(ha); } else { DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", ha->host_no, __func__)); diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index b1925d195f41..081b6b78d2c6 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -1526,7 +1526,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) int qla4xxx_request_irqs(struct scsi_qla_host *ha) { - int ret; + int ret = 0; int rval = QLA_ERROR; if (is_qla40XX(ha)) @@ -1580,15 +1580,13 @@ try_msi: } } - /* - * Prevent interrupts from falling back to INTx mode in cases where - * interrupts cannot get acquired through MSI-X or MSI mode. - */ +try_intx: if (is_qla8022(ha)) { - ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret); + ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n", + __func__); goto irq_not_attached; } -try_intx: + /* Trying INTx */ ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, IRQF_SHARED, DRIVER_NAME, ha); diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 0a6b782d6fdb..0a3312c6dd6d 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -2381,7 +2381,7 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config) ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__); rval = QLA_ERROR; - goto exit_config_acb; + goto exit_free_acb; } memcpy(ha->saved_acb, acb, acb_len); break; @@ -2395,8 +2395,6 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config) } memcpy(acb, ha->saved_acb, acb_len); - kfree(ha->saved_acb); - ha->saved_acb = NULL; rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); if (rval != QLA_SUCCESS) @@ -2412,6 +2410,10 @@ exit_free_acb: dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb, acb_dma); exit_config_acb: + if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) { + kfree(ha->saved_acb); + ha->saved_acb = NULL; + } DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 63328c812b70..9dbdb4be2d8f 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -14,6 +14,7 @@ #include <asm-generic/io-64-nonatomic-lo-hi.h> +#define TIMEOUT_100_MS 100 #define MASK(n) DMA_BIT_MASK(n) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) @@ -1176,6 +1177,112 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) return 0; } +/** + * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory + * @ha: Pointer to adapter structure + * @addr: Flash address to write to + * @data: Data to be written + * @count: word_count to be written + * + * Return: On success return QLA_SUCCESS + * On error return QLA_ERROR + **/ +int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, + uint32_t *data, uint32_t count) +{ + int i, j; + uint32_t agt_ctrl; + unsigned long flags; + int ret_val = QLA_SUCCESS; + + /* Only 128-bit aligned access */ + if (addr & 0xF) { + ret_val = QLA_ERROR; + goto exit_ms_mem_write; + } + + write_lock_irqsave(&ha->hw_lock, flags); + + /* Write address */ + ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + for (i = 0; i < count; i++, addr += 16) { + if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, + QLA8XXX_ADDR_QDR_NET_MAX)) || + (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, + QLA8XXX_ADDR_DDR_NET_MAX)))) { + ret_val = QLA_ERROR; + goto exit_ms_mem_write_unlock; + } + + ret_val = ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_ADDR_LO, + addr); + /* Write data */ + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_LO, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_HI, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_ULO, + *data++); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_WRDATA_UHI, + *data++); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + /* Check write status */ + ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_ENABLE); + ret_val |= ha->isp_ops->wr_reg_indirect(ha, + MD_MIU_TEST_AGT_CTRL, + MIU_TA_CTL_WRITE_START); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", + __func__); + goto exit_ms_mem_write_unlock; + } + + for (j = 0; j < MAX_CTL_CHECK; j++) { + ret_val = ha->isp_ops->rd_reg_indirect(ha, + MD_MIU_TEST_AGT_CTRL, + &agt_ctrl); + if (ret_val == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", + __func__); + goto exit_ms_mem_write_unlock; + } + if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) + break; + } + + /* Status check failed */ + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", + __func__); + ret_val = QLA_ERROR; + goto exit_ms_mem_write_unlock; + } + } + +exit_ms_mem_write_unlock: + write_unlock_irqrestore(&ha->hw_lock, flags); + +exit_ms_mem_write: + return ret_val; +} + static int qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) { @@ -1714,6 +1821,101 @@ void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha) qla4_82xx_rom_unlock(ha); } +static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha, + uint32_t addr1, uint32_t mask) +{ + unsigned long timeout; + uint32_t rval = QLA_SUCCESS; + uint32_t temp; + + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + + if (time_after_eq(jiffies, timeout)) { + ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n"); + return QLA_ERROR; + } + } while (1); + + return rval; +} + +uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1, + uint32_t addr3, uint32_t mask, uint32_t addr, + uint32_t *data_ptr) +{ + int rval = QLA_SUCCESS; + uint32_t temp; + uint32_t data; + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_rd_reg; + + temp = (0x40000000 | addr); + ha->isp_ops->wr_reg_indirect(ha, addr1, temp); + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_rd_reg; + + ha->isp_ops->rd_reg_indirect(ha, addr3, &data); + *data_ptr = data; + +exit_ipmdio_rd_reg: + return rval; +} + + +static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha, + uint32_t addr1, + uint32_t addr2, + uint32_t addr3, + uint32_t mask) +{ + unsigned long timeout; + uint32_t temp; + uint32_t rval = QLA_SUCCESS; + + timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); + do { + ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp); + if ((temp & 0x1) != 1) + break; + if (time_after_eq(jiffies, timeout)) { + ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n"); + return QLA_ERROR; + } + } while (1); + + return rval; +} + +static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha, + uint32_t addr1, uint32_t addr3, + uint32_t mask, uint32_t addr, + uint32_t value) +{ + int rval = QLA_SUCCESS; + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_wr_reg; + + ha->isp_ops->wr_reg_indirect(ha, addr3, value); + ha->isp_ops->wr_reg_indirect(ha, addr1, addr); + + rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); + if (rval) + goto exit_ipmdio_wr_reg; + +exit_ipmdio_wr_reg: + return rval; +} + static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) @@ -1822,7 +2024,7 @@ error_exit: return rval; } -static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha, +static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { @@ -1899,11 +2101,11 @@ static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha, dma_desc.cmd.read_data_size = size; /* Prepare: Write pex-dma descriptor to MS memory. */ - rval = qla4_83xx_ms_mem_write_128b(ha, + rval = qla4_8xxx_ms_mem_write_128b(ha, (uint64_t)m_hdr->desc_card_addr, (uint32_t *)&dma_desc, (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); - if (rval == -1) { + if (rval != QLA_SUCCESS) { ql4_printk(KERN_INFO, ha, "%s: Error writing rdmem-dma-init to MS !!!\n", __func__); @@ -2359,17 +2561,10 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha, uint32_t *data_ptr = *d_ptr; int rval = QLA_SUCCESS; - if (is_qla8032(ha) || is_qla8042(ha)) { - rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr, - &data_ptr); - if (rval != QLA_SUCCESS) { - rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, - &data_ptr); - } - } else { + rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, &data_ptr); - } *d_ptr = data_ptr; return rval; } @@ -2440,6 +2635,227 @@ exit_process_pollrd: return rval; } +static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int loop_cnt; + uint32_t addr1, addr2, value, data, temp, wrval; + uint8_t stride, stride2; + uint16_t count; + uint32_t poll, mask, data_size, modify_mask; + uint32_t wait_count = 0; + uint32_t *data_ptr = *d_ptr; + struct qla8044_minidump_entry_rddfe *rddfe; + uint32_t rval = QLA_SUCCESS; + + rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr; + addr1 = le32_to_cpu(rddfe->addr_1); + value = le32_to_cpu(rddfe->value); + stride = le32_to_cpu(rddfe->stride); + stride2 = le32_to_cpu(rddfe->stride2); + count = le32_to_cpu(rddfe->count); + + poll = le32_to_cpu(rddfe->poll); + mask = le32_to_cpu(rddfe->mask); + modify_mask = le32_to_cpu(rddfe->modify_mask); + data_size = le32_to_cpu(rddfe->data_size); + + addr2 = addr1 + stride; + + for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { + ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value)); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } else { + ha->isp_ops->rd_reg_indirect(ha, addr2, &temp); + temp = temp & modify_mask; + temp = (temp | ((loop_cnt << 16) | loop_cnt)); + wrval = ((temp << 16) | temp); + + ha->isp_ops->wr_reg_indirect(ha, addr2, wrval); + ha->isp_ops->wr_reg_indirect(ha, addr1, value); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", + __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } + + ha->isp_ops->wr_reg_indirect(ha, addr1, + ((0x40000000 | value) + + stride2)); + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); + if ((temp & mask) != 0) + break; + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", + __func__); + rval = QLA_ERROR; + goto exit_process_rddfe; + } + + ha->isp_ops->rd_reg_indirect(ha, addr2, &data); + + *data_ptr++ = cpu_to_le32(wrval); + *data_ptr++ = cpu_to_le32(data); + } + } + + *d_ptr = data_ptr; +exit_process_rddfe: + return rval; +} + +static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + int rval = QLA_SUCCESS; + uint32_t addr1, addr2, value1, value2, data, selval; + uint8_t stride1, stride2; + uint32_t addr3, addr4, addr5, addr6, addr7; + uint16_t count, loop_cnt; + uint32_t poll, mask; + uint32_t *data_ptr = *d_ptr; + struct qla8044_minidump_entry_rdmdio *rdmdio; + + rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr; + addr1 = le32_to_cpu(rdmdio->addr_1); + addr2 = le32_to_cpu(rdmdio->addr_2); + value1 = le32_to_cpu(rdmdio->value_1); + stride1 = le32_to_cpu(rdmdio->stride_1); + stride2 = le32_to_cpu(rdmdio->stride_2); + count = le32_to_cpu(rdmdio->count); + + poll = le32_to_cpu(rdmdio->poll); + mask = le32_to_cpu(rdmdio->mask); + value2 = le32_to_cpu(rdmdio->value_2); + + addr3 = addr1 + stride1; + + for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { + rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, + addr3, mask); + if (rval) + goto exit_process_rdmdio; + + addr4 = addr2 - stride1; + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4, + value2); + if (rval) + goto exit_process_rdmdio; + + addr5 = addr2 - (2 * stride1); + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5, + value1); + if (rval) + goto exit_process_rdmdio; + + addr6 = addr2 - (3 * stride1); + rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, + addr6, 0x2); + if (rval) + goto exit_process_rdmdio; + + rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, + addr3, mask); + if (rval) + goto exit_process_rdmdio; + + addr7 = addr2 - (4 * stride1); + rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, + mask, addr7, &data); + if (rval) + goto exit_process_rdmdio; + + selval = (value2 << 18) | (value1 << 2) | 2; + + stride2 = le32_to_cpu(rdmdio->stride_2); + *data_ptr++ = cpu_to_le32(selval); + *data_ptr++ = cpu_to_le32(data); + + value1 = value1 + stride2; + *d_ptr = data_ptr; + } + +exit_process_rdmdio: + return rval; +} + +static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha, + struct qla8xxx_minidump_entry_hdr *entry_hdr, + uint32_t **d_ptr) +{ + uint32_t addr1, addr2, value1, value2, poll, mask, r_value; + struct qla8044_minidump_entry_pollwr *pollwr_hdr; + uint32_t wait_count = 0; + uint32_t rval = QLA_SUCCESS; + + pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; + addr1 = le32_to_cpu(pollwr_hdr->addr_1); + addr2 = le32_to_cpu(pollwr_hdr->addr_2); + value1 = le32_to_cpu(pollwr_hdr->value_1); + value2 = le32_to_cpu(pollwr_hdr->value_2); + + poll = le32_to_cpu(pollwr_hdr->poll); + mask = le32_to_cpu(pollwr_hdr->mask); + + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + + wait_count++; + } + + if (wait_count == poll) { + ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); + rval = QLA_ERROR; + goto exit_process_pollwr; + } + + ha->isp_ops->wr_reg_indirect(ha, addr2, value2); + ha->isp_ops->wr_reg_indirect(ha, addr1, value1); + + wait_count = 0; + while (wait_count < poll) { + ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); + + if ((r_value & poll) != 0) + break; + wait_count++; + } + +exit_process_pollwr: + return rval; +} + static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha, struct qla8xxx_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) @@ -2753,6 +3169,24 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha) if (rval != QLA_SUCCESS) qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); break; + case QLA8044_RDDFE: + rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8044_RDMDIO: + rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; + case QLA8044_POLLWR: + rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr, + &data_ptr); + if (rval != QLA_SUCCESS) + qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); + break; case QLA8XXX_RDNOP: default: qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index 14500a0f62cc..337d9fcf6417 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h @@ -858,6 +858,9 @@ struct crb_addr_pair { #define QLA83XX_POLLRD 35 #define QLA83XX_RDMUX2 36 #define QLA83XX_POLLRDMWR 37 +#define QLA8044_RDDFE 38 +#define QLA8044_RDMDIO 39 +#define QLA8044_POLLWR 40 #define QLA8XXX_RDROM 71 #define QLA8XXX_RDMEM 72 #define QLA8XXX_CNTRL 98 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 459b9f7186fd..320206376206 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -83,12 +83,12 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo, " Target Session Recovery Timeout.\n" "\t\t Default: 120 sec."); -int ql4xmdcapmask = 0x1F; +int ql4xmdcapmask = 0; module_param(ql4xmdcapmask, int, S_IRUGO); MODULE_PARM_DESC(ql4xmdcapmask, " Set the Minidump driver capture mask level.\n" - "\t\t Default is 0x1F.\n" - "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F"); + "\t\t Default is 0 (firmware default capture mask)\n" + "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF"); int ql4xenablemd = 1; module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); @@ -1742,6 +1742,9 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, struct sockaddr *dst_addr; struct scsi_qla_host *ha; + if (!qla_ep) + return -ENOTCONN; + ha = to_qla_host(qla_ep->host); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, ha->host_no)); @@ -1749,9 +1752,6 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, switch (param) { case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_ADDRESS: - if (!qla_ep) - return -ENOTCONN; - dst_addr = (struct sockaddr *)&qla_ep->dst_addr; if (!dst_addr) return -ENOTCONN; @@ -2879,7 +2879,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, struct iscsi_conn *conn; struct qla_conn *qla_conn; struct sockaddr *dst_addr; - int len = 0; conn = cls_conn->dd_data; qla_conn = conn->dd_data; @@ -2893,9 +2892,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, default: return iscsi_conn_get_param(cls_conn, param, buf); } - - return len; - } int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) @@ -3569,14 +3565,13 @@ static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess, if (test_bit(OPT_IPV6_DEVICE, &options)) { conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; - conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); + conn->link_local_ipv6_addr = kmemdup( + fw_ddb_entry->link_local_ipv6_addr, + IPv6_ADDR_LEN, GFP_KERNEL); if (!conn->link_local_ipv6_addr) { rc = -ENOMEM; goto exit_copy; } - - memcpy(conn->link_local_ipv6_addr, - fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN); } else { conn->ipv4_tos = fw_ddb_entry->ipv4_tos; } @@ -4565,6 +4560,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) || test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || + test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) || test_bit(DPC_AEN, &ha->dpc_flags)) { DEBUG2(printk("scsi%ld: %s: scheduling dpc routine" " - dpc flags = 0x%lx\n", @@ -4862,9 +4858,6 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) ha->host_no, __func__)); status = ha->isp_ops->reset_firmware(ha); if (status == QLA_SUCCESS) { - if (!test_bit(AF_FW_RECOVERY, &ha->flags)) - qla4xxx_cmd_wait(ha); - ha->isp_ops->disable_intrs(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); @@ -5432,6 +5425,11 @@ dpc_post_reset_ha: qla4xxx_relogin_all_devices(ha); } } + if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { + if (qla4xxx_sysfs_ddb_export(ha)) + ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", + __func__); + } } /** @@ -8409,7 +8407,7 @@ exit_ddb_del: * * Export the firmware DDB for all send targets and normal targets to sysfs. **/ -static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) +int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) { struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; @@ -8847,11 +8845,8 @@ skip_retry_init: ql4_printk(KERN_ERR, ha, "%s: No iSCSI boot target configured\n", __func__); - if (qla4xxx_sysfs_ddb_export(ha)) - ql4_printk(KERN_ERR, ha, - "%s: Error exporting ddb to sysfs\n", __func__); - - /* Perform the build ddb list and login to each */ + set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); + /* Perform the build ddb list and login to each */ qla4xxx_build_ddb_list(ha, INIT_ADAPTER); iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb); qla4xxx_wait_login_resp_boot_tgt(ha); diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index c6ba0a6b8458..f11eaa773339 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.04.00-k4" +#define QLA4XXX_DRIVER_VERSION "5.04.00-k6" diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index f3e9cc038d1d..1328a2621070 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -130,6 +130,7 @@ static const char * scsi_debug_version_date = "20100324"; #define SCSI_DEBUG_OPT_DIF_ERR 32 #define SCSI_DEBUG_OPT_DIX_ERR 64 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128 +#define SCSI_DEBUG_OPT_SHORT_TRANSFER 256 /* When "every_nth" > 0 then modulo "every_nth" commands: * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set * - a RECOVERED_ERROR is simulated on successful read and write @@ -3583,6 +3584,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) int inj_transport = 0; int inj_dif = 0; int inj_dix = 0; + int inj_short = 0; int delay_override = 0; int unmap = 0; @@ -3628,6 +3630,8 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) inj_dif = 1; /* to reads and writes below */ else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts) inj_dix = 1; /* to reads and writes below */ + else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts) + inj_short = 1; } if (devip->wlun) { @@ -3744,6 +3748,10 @@ read: if (scsi_debug_fake_rw) break; get_data_transfer_info(cmd, &lba, &num, &ei_lba); + + if (inj_short) + num /= 2; + errsts = resp_read(SCpnt, lba, num, devip, ei_lba); if (inj_recovered && (0 == errsts)) { mk_sense_buffer(devip, RECOVERED_ERROR, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index f17aa7aa7879..7e957918f33f 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work) "aborting command %p\n", scmd)); rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); if (rtn == SUCCESS) { - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); if (scsi_host_eh_past_deadline(sdev->host)) { SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, @@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work) scmd_printk(KERN_WARNING, scmd, "scmd %p terminate " "aborted command\n", scmd)); - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); scsi_finish_command(scmd); } } @@ -287,15 +287,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) else if (host->hostt->eh_timed_out) rtn = host->hostt->eh_timed_out(scmd); - if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort) - if (scsi_abort_command(scmd) == SUCCESS) + if (rtn == BLK_EH_NOT_HANDLED) { + if (!host->hostt->no_async_abort && + scsi_abort_command(scmd) == SUCCESS) return BLK_EH_NOT_HANDLED; - scmd->result |= DID_TIME_OUT << 16; - - if (unlikely(rtn == BLK_EH_NOT_HANDLED && - !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) - rtn = BLK_EH_HANDLED; + set_host_byte(scmd, DID_TIME_OUT); + if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) + rtn = BLK_EH_HANDLED; + } return rtn; } @@ -1029,6 +1029,7 @@ retry: rtn = NEEDS_RETRY; } else { timeleft = wait_for_completion_timeout(&done, timeout); + rtn = SUCCESS; } shost->eh_action = NULL; @@ -1776,7 +1777,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) break; case DID_ABORT: if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { - scmd->result |= DID_TIME_OUT << 16; + set_host_byte(scmd, DID_TIME_OUT); return SUCCESS; } case DID_NO_CONNECT: @@ -1951,6 +1952,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) */ req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); + blk_rq_set_block_pc(req); + req->cmd[0] = ALLOW_MEDIUM_REMOVAL; req->cmd[1] = 0; req->cmd[2] = 0; @@ -1960,7 +1963,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) req->cmd_len = COMMAND_SIZE(req->cmd[0]); - req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_QUIET; req->timeout = 10 * HZ; req->retries = 5; @@ -2306,6 +2308,12 @@ scsi_reset_provider(struct scsi_device *dev, int flag) } scmd = scsi_get_command(dev, GFP_KERNEL); + if (!scmd) { + rtn = FAILED; + put_device(&dev->sdev_gendev); + goto out_put_autopm_host; + } + blk_rq_init(NULL, &req); scmd->request = &req; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9db097a28a74..3f50dfcb3227 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -140,7 +140,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) cmd->result = 0; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); - kblockd_schedule_work(q, &device->requeue_work); + kblockd_schedule_work(&device->requeue_work); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -195,6 +195,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); if (!req) return ret; + blk_rq_set_block_pc(req); if (bufflen && blk_rq_map_kern(sdev->request_queue, req, buffer, bufflen, __GFP_WAIT)) @@ -206,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req->sense_len = 0; req->retries = retries; req->timeout = timeout; - req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; /* @@ -512,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) scsi_run_queue(sdev->request_queue); } -static void __scsi_release_buffers(struct scsi_cmnd *, int); - -/* - * Function: scsi_end_request() - * - * Purpose: Post-processing of completed commands (usually invoked at end - * of upper level post-processing and scsi_io_completion). - * - * Arguments: cmd - command that is complete. - * error - 0 if I/O indicates success, < 0 for I/O error. - * bytes - number of bytes of completed I/O - * requeue - indicates whether we should requeue leftovers. - * - * Lock status: Assumed that lock is not held upon entry. - * - * Returns: cmd if requeue required, NULL otherwise. - * - * Notes: This is called for block device requests in order to - * mark some number of sectors as complete. - * - * We are guaranteeing that the request queue will be goosed - * at some point during this call. - * Notes: If cmd was requeued, upon return it will be a stale pointer. - */ -static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, - int bytes, int requeue) -{ - struct request_queue *q = cmd->device->request_queue; - struct request *req = cmd->request; - - /* - * If there are blocks left over at the end, set up the command - * to queue the remainder of them. - */ - if (blk_end_request(req, error, bytes)) { - /* kill remainder if no retrys */ - if (error && scsi_noretry_cmd(cmd)) - blk_end_request_all(req, error); - else { - if (requeue) { - /* - * Bleah. Leftovers again. Stick the - * leftovers in the front of the - * queue, and goose the queue again. - */ - scsi_release_buffers(cmd); - scsi_requeue_command(q, cmd); - cmd = NULL; - } - return cmd; - } - } - - /* - * This will goose the queue request function at the end, so we don't - * need to worry about launching another command. - */ - __scsi_release_buffers(cmd, 0); - scsi_next_command(cmd); - return NULL; -} - static inline unsigned int scsi_sgtable_index(unsigned short nents) { unsigned int index; @@ -625,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb) __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); } -static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) -{ - - if (cmd->sdb.table.nents) - scsi_free_sgtable(&cmd->sdb); - - memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - - if (do_bidi_check && scsi_bidi_cmnd(cmd)) { - struct scsi_data_buffer *bidi_sdb = - cmd->request->next_rq->special; - scsi_free_sgtable(bidi_sdb); - kmem_cache_free(scsi_sdb_cache, bidi_sdb); - cmd->request->next_rq->special = NULL; - } - - if (scsi_prot_sg_count(cmd)) - scsi_free_sgtable(cmd->prot_sdb); -} - /* * Function: scsi_release_buffers() * - * Purpose: Completion processing for block device I/O requests. + * Purpose: Free resources allocate for a scsi_command. * * Arguments: cmd - command that we are bailing. * @@ -659,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) * Notes: In the event that an upper level driver rejects a * command, we must release resources allocated during * the __init_io() function. Primarily this would involve - * the scatter-gather table, and potentially any bounce - * buffers. + * the scatter-gather table. */ void scsi_release_buffers(struct scsi_cmnd *cmd) { - __scsi_release_buffers(cmd, 1); + if (cmd->sdb.table.nents) + scsi_free_sgtable(&cmd->sdb); + + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + + if (scsi_prot_sg_count(cmd)) + scsi_free_sgtable(cmd->prot_sdb); } EXPORT_SYMBOL(scsi_release_buffers); +static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) +{ + struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; + + scsi_free_sgtable(bidi_sdb); + kmem_cache_free(scsi_sdb_cache, bidi_sdb); + cmd->request->next_rq->special = NULL; +} + /** * __scsi_error_from_host_byte - translate SCSI error code into errno * @cmd: SCSI command (unused) @@ -725,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) * * Returns: Nothing * - * Notes: This function is matched in terms of capabilities to - * the function that created the scatter-gather list. - * In other words, if there are no bounce buffers - * (the normal case for most drivers), we don't need - * the logic to deal with cleaning up afterwards. - * - * We must call scsi_end_request(). This will finish off - * the specified number of sectors. If we are done, the - * command block will be released and the queue function - * will be goosed. If we are not done then we have to + * Notes: We will finish off the specified number of sectors. If we + * are done, the command block will be released and the queue + * function will be goosed. If we are not done then we have to * figure out what to do next: * * a) We can call scsi_requeue_command(). The request @@ -743,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) * be used if we made forward progress, or if we want * to switch from READ(10) to READ(6) for example. * - * b) We can call scsi_queue_insert(). The request will + * b) We can call __scsi_queue_insert(). The request will * be put back on the queue and retried using the same * command as before, possibly after a delay. * @@ -801,11 +726,21 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) req->next_rq->resid_len = scsi_in(cmd)->resid; scsi_release_buffers(cmd); + scsi_release_bidi_buffers(cmd); + blk_end_request_all(req, 0); scsi_next_command(cmd); return; } + } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { + /* + * Certain non BLOCK_PC requests are commands that don't + * actually transfer anything (FLUSH), so cannot use + * good_bytes != blk_rq_bytes(req) as the signal for an error. + * This sets the error explicitly for the problem case. + */ + error = __scsi_error_from_host_byte(cmd, result); } /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ @@ -840,12 +775,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } /* - * A number of bytes were successfully read. If there - * are leftovers and there is some kind of error - * (result != 0), retry the rest. + * If we finished all bytes in the request we are done now. */ - if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) - return; + if (!blk_end_request(req, error, good_bytes)) + goto next_command; + + /* + * Kill remainder if no retrys. + */ + if (error && scsi_noretry_cmd(cmd)) { + blk_end_request_all(req, error); + goto next_command; + } + + /* + * If there had been no error, but we have leftover bytes in the + * requeues just queue the command up again. + */ + if (result == 0) + goto requeue; error = __scsi_error_from_host_byte(cmd, result); @@ -973,7 +921,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) switch (action) { case ACTION_FAIL: /* Give up and fail the remainder of the request */ - scsi_release_buffers(cmd); if (!(req->cmd_flags & REQ_QUIET)) { if (description) scmd_printk(KERN_INFO, cmd, "%s\n", @@ -983,12 +930,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_print_sense("", cmd); scsi_print_command(cmd); } - if (blk_end_request_err(req, error)) - scsi_requeue_command(q, cmd); - else - scsi_next_command(cmd); - break; + if (!blk_end_request_err(req, error)) + goto next_command; + /*FALLTHRU*/ case ACTION_REPREP: + requeue: /* Unprep the request and put it back at the head of the queue. * A new command will be prepared and issued. */ @@ -1004,6 +950,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); break; } + return; + +next_command: + scsi_release_buffers(cmd); + scsi_next_command(cmd); } static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, @@ -1019,8 +970,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, return BLKPREP_DEFER; } - req->buffer = NULL; - /* * Next, walk the list, and fill in the addresses and sizes of * each segment. @@ -1130,15 +1079,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd; - int ret = scsi_prep_state_check(sdev, req); - - if (ret != BLKPREP_OK) - return ret; - - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) - return BLKPREP_DEFER; + struct scsi_cmnd *cmd = req->special; /* * BLOCK_PC requests may transfer data, in which case they must @@ -1158,7 +1099,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) BUG_ON(blk_rq_bytes(req)); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - req->buffer = NULL; } cmd->cmd_len = req->cmd_len; @@ -1182,15 +1122,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); */ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd; - int ret = scsi_prep_state_check(sdev, req); - - if (ret != BLKPREP_OK) - return ret; + struct scsi_cmnd *cmd = req->special; if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh && sdev->scsi_dh_data->scsi_dh->prep_fn)) { - ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); + int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); if (ret != BLKPREP_OK) return ret; } @@ -1200,16 +1136,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) */ BUG_ON(!req->nr_phys_segments); - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) - return BLKPREP_DEFER; - memset(cmd->cmnd, 0, BLK_MAX_CDB); return scsi_init_io(cmd, GFP_ATOMIC); } EXPORT_SYMBOL(scsi_setup_fs_cmnd); -int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) +static int +scsi_prep_state_check(struct scsi_device *sdev, struct request *req) { int ret = BLKPREP_OK; @@ -1261,9 +1194,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) } return ret; } -EXPORT_SYMBOL(scsi_prep_state_check); -int scsi_prep_return(struct request_queue *q, struct request *req, int ret) +static int +scsi_prep_return(struct request_queue *q, struct request *req, int ret) { struct scsi_device *sdev = q->queuedata; @@ -1294,18 +1227,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) return ret; } -EXPORT_SYMBOL(scsi_prep_return); -int scsi_prep_fn(struct request_queue *q, struct request *req) +static int scsi_prep_fn(struct request_queue *q, struct request *req) { struct scsi_device *sdev = q->queuedata; - int ret = BLKPREP_KILL; + struct scsi_cmnd *cmd; + int ret; + + ret = scsi_prep_state_check(sdev, req); + if (ret != BLKPREP_OK) + goto out; - if (req->cmd_type == REQ_TYPE_BLOCK_PC) + cmd = scsi_get_cmd_from_req(sdev, req); + if (unlikely(!cmd)) { + ret = BLKPREP_DEFER; + goto out; + } + + if (req->cmd_type == REQ_TYPE_FS) + ret = scsi_cmd_to_driver(cmd)->init_command(cmd); + else if (req->cmd_type == REQ_TYPE_BLOCK_PC) ret = scsi_setup_blk_pc_cmnd(sdev, req); + else + ret = BLKPREP_KILL; + +out: return scsi_prep_return(q, req, ret); } -EXPORT_SYMBOL(scsi_prep_fn); + +static void scsi_unprep_fn(struct request_queue *q, struct request *req) +{ + if (req->cmd_type == REQ_TYPE_FS) { + struct scsi_cmnd *cmd = req->special; + struct scsi_driver *drv = scsi_cmd_to_driver(cmd); + + if (drv->uninit_command) + drv->uninit_command(cmd); + } +} /* * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else @@ -1726,6 +1685,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) return NULL; blk_queue_prep_rq(q, scsi_prep_fn); + blk_queue_unprep_rq(q, scsi_unprep_fn); blk_queue_softirq_done(q, scsi_softirq_done); blk_queue_rq_timed_out(q, scsi_times_out); blk_queue_lld_busy(q, scsi_lld_busy); diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index fe30ea94ffe6..109802f776ed 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) goto next_msg; } - if (!capable(CAP_SYS_ADMIN)) { + if (!netlink_capable(skb, CAP_SYS_ADMIN)) { err = -EPERM; goto next_msg; } diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c index 2b6b93f7d8ef..546f16299ef9 100644 --- a/drivers/scsi/scsi_sysctl.c +++ b/drivers/scsi/scsi_sysctl.c @@ -12,7 +12,7 @@ #include "scsi_priv.h" -static ctl_table scsi_table[] = { +static struct ctl_table scsi_table[] = { { .procname = "logging_level", .data = &scsi_logging_level, .maxlen = sizeof(scsi_logging_level), @@ -21,14 +21,14 @@ static ctl_table scsi_table[] = { { } }; -static ctl_table scsi_dir_table[] = { +static struct ctl_table scsi_dir_table[] = { { .procname = "scsi", .mode = 0555, .child = scsi_table }, { } }; -static ctl_table scsi_root_table[] = { +static struct ctl_table scsi_root_table[] = { { .procname = "dev", .mode = 0555, .child = scsi_dir_table }, diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 2bea4f0b684a..503594e5f76d 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -28,7 +28,7 @@ scsi_trace_misc(struct trace_seq *, unsigned char *, int); static const char * scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) { - const char *ret = p->buffer + p->len; + const char *ret = trace_seq_buffer_ptr(p); sector_t lba = 0, txlen = 0; lba |= ((cdb[1] & 0x1F) << 16); @@ -46,7 +46,7 @@ scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) static const char * scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) { - const char *ret = p->buffer + p->len; + const char *ret = trace_seq_buffer_ptr(p); sector_t lba = 0, txlen = 0; lba |= (cdb[2] << 24); @@ -71,7 +71,7 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) static const char * scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) { - const char *ret = p->buffer + p->len; + const char *ret = trace_seq_buffer_ptr(p); sector_t lba = 0, txlen = 0; lba |= (cdb[2] << 24); @@ -94,7 +94,7 @@ scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) static const char * scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) { - const char *ret = p->buffer + p->len; + const char *ret = trace_seq_buffer_ptr(p); sector_t lba = 0, txlen = 0; lba |= ((u64)cdb[2] << 56); @@ -125,7 +125,7 @@ scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) static const char * scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) { - const char *ret = p->buffer + p->len, *cmd; + const char *ret = trace_seq_buffer_ptr(p), *cmd; sector_t lba = 0, txlen = 0; u32 ei_lbrt = 0; @@ -180,7 +180,7 @@ out: static const char * scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) { - const char *ret = p->buffer + p->len; + const char *ret = trace_seq_buffer_ptr(p); unsigned int regions = cdb[7] << 8 | cdb[8]; trace_seq_printf(p, "regions=%u", (regions - 8) / 16); @@ -192,7 +192,7 @@ scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) static const char * scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) { - const char *ret = p->buffer + p->len, *cmd; + const char *ret = trace_seq_buffer_ptr(p), *cmd; sector_t lba = 0; u32 alloc_len = 0; @@ -247,7 +247,7 @@ scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len) static const char * scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len) { - const char *ret = p->buffer + p->len; + const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, "-"); trace_seq_putc(p, 0); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index f80908f74ca9..521f5838594b 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -2549,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work) fc_flush_devloss(shost); if (!cancel_delayed_work(&rport->dev_loss_work)) fc_flush_devloss(shost); + cancel_work_sync(&rport->scan_work); spin_lock_irqsave(shost->host_lock, flags); rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 1b681427dde0..c341f855fadc 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy) list_del(&rphy->list); mutex_unlock(&sas_host->lock); - sas_bsg_remove(shost, rphy); - transport_destroy_device(dev); put_device(dev); @@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy) } sas_rphy_unlink(rphy); + sas_bsg_remove(NULL, rphy); transport_remove_device(dev); device_del(dev); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index efcbcd182863..6825eda1114a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -109,6 +109,8 @@ static int sd_suspend_system(struct device *); static int sd_suspend_runtime(struct device *); static int sd_resume(struct device *); static void sd_rescan(struct device *); +static int sd_init_command(struct scsi_cmnd *SCpnt); +static void sd_uninit_command(struct scsi_cmnd *SCpnt); static int sd_done(struct scsi_cmnd *); static int sd_eh_action(struct scsi_cmnd *, int); static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); @@ -503,6 +505,8 @@ static struct scsi_driver sd_template = { .pm = &sd_pm_ops, }, .rescan = sd_rescan, + .init_command = sd_init_command, + .uninit_command = sd_uninit_command, .done = sd_done, .eh_action = sd_eh_action, }; @@ -737,16 +741,14 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) goto out; } + rq->completion_data = page; blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); - rq->buffer = page_address(page); rq->__data_len = nr_bytes; out: - if (ret != BLKPREP_OK) { + if (ret != BLKPREP_OK) __free_page(page); - rq->buffer = NULL; - } return ret; } @@ -838,14 +840,13 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) return scsi_setup_blk_pc_cmnd(sdp, rq); } -static void sd_unprep_fn(struct request_queue *q, struct request *rq) +static void sd_uninit_command(struct scsi_cmnd *SCpnt) { - struct scsi_cmnd *SCpnt = rq->special; + struct request *rq = SCpnt->request; + + if (rq->cmd_flags & REQ_DISCARD) + __free_page(rq->completion_data); - if (rq->cmd_flags & REQ_DISCARD) { - free_page((unsigned long)rq->buffer); - rq->buffer = NULL; - } if (SCpnt->cmnd != rq->cmd) { mempool_free(SCpnt->cmnd, sd_cdb_pool); SCpnt->cmnd = NULL; @@ -853,18 +854,10 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) } } -/** - * sd_prep_fn - build a scsi (read or write) command from - * information in the request structure. - * @SCpnt: pointer to mid-level's per scsi command structure that - * contains request and into which the scsi command is written - * - * Returns 1 if successful and 0 if error (or cannot be done now). - **/ -static int sd_prep_fn(struct request_queue *q, struct request *rq) +static int sd_init_command(struct scsi_cmnd *SCpnt) { - struct scsi_cmnd *SCpnt; - struct scsi_device *sdp = q->queuedata; + struct request *rq = SCpnt->request; + struct scsi_device *sdp = SCpnt->device; struct gendisk *disk = rq->rq_disk; struct scsi_disk *sdkp; sector_t block = blk_rq_pos(rq); @@ -886,12 +879,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) } else if (rq->cmd_flags & REQ_FLUSH) { ret = scsi_setup_flush_cmnd(sdp, rq); goto out; - } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - ret = scsi_setup_blk_pc_cmnd(sdp, rq); - goto out; - } else if (rq->cmd_type != REQ_TYPE_FS) { - ret = BLKPREP_KILL; - goto out; } ret = scsi_setup_fs_cmnd(sdp, rq); if (ret != BLKPREP_OK) @@ -903,11 +890,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) * is used for a killable error condition */ ret = BLKPREP_KILL; - SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, - "sd_prep_fn: block=%llu, " - "count=%d\n", - (unsigned long long)block, - this_count)); + SCSI_LOG_HLQUEUE(1, + scmd_printk(KERN_INFO, SCpnt, + "%s: block=%llu, count=%d\n", + __func__, (unsigned long long)block, this_count)); if (!sdp || !scsi_device_online(sdp) || block + blk_rq_sectors(rq) > get_capacity(disk)) { @@ -1127,7 +1113,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq) */ ret = BLKPREP_OK; out: - return scsi_prep_return(q, rq, ret); + return ret; } /** @@ -1689,12 +1675,12 @@ static int sd_done(struct scsi_cmnd *SCpnt) sshdr.ascq)); } #endif + sdkp->medium_access_timed_out = 0; + if (driver_byte(result) != DRIVER_SENSE && (!sense_valid || sense_deferred)) goto out; - sdkp->medium_access_timed_out = 0; - switch (sshdr.sense_key) { case HARDWARE_ERROR: case MEDIUM_ERROR: @@ -2455,7 +2441,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) } sdkp->DPOFUA = (data.device_specific & 0x10) != 0; - if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + if (sdp->broken_fua) { + sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); + sdkp->DPOFUA = 0; + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; @@ -2878,9 +2867,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_revalidate_disk(gd); - blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); - blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn); - gd->driverfs_dev = &sdp->sdev_gendev; gd->flags = GENHD_FL_EXT_DEVT; if (sdp->removable) { @@ -3028,8 +3014,6 @@ static int sd_remove(struct device *dev) async_synchronize_full_domain(&scsi_sd_pm_domain); async_synchronize_full_domain(&scsi_sd_probe_domain); - blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); - blk_queue_unprep_rq(sdkp->device->request_queue, NULL); device_del(&sdkp->dev); del_gendisk(sdkp->disk); sd_shutdown(dev); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index df5e961484e1..53268aaba559 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1653,10 +1653,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) if (!rq) return -ENOMEM; + blk_rq_set_block_pc(rq); memcpy(rq->cmd, cmd, hp->cmd_len); - rq->cmd_len = hp->cmd_len; - rq->cmd_type = REQ_TYPE_BLOCK_PC; srp->rq = rq; rq->end_io_data = srp; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 40d85929aefe..93cbd36c990b 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -79,6 +79,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM); static DEFINE_MUTEX(sr_mutex); static int sr_probe(struct device *); static int sr_remove(struct device *); +static int sr_init_command(struct scsi_cmnd *SCpnt); static int sr_done(struct scsi_cmnd *); static int sr_runtime_suspend(struct device *dev); @@ -94,6 +95,7 @@ static struct scsi_driver sr_template = { .remove = sr_remove, .pm = &sr_pm_ops, }, + .init_command = sr_init_command, .done = sr_done, }; @@ -378,21 +380,14 @@ static int sr_done(struct scsi_cmnd *SCpnt) return good_bytes; } -static int sr_prep_fn(struct request_queue *q, struct request *rq) +static int sr_init_command(struct scsi_cmnd *SCpnt) { int block = 0, this_count, s_size; struct scsi_cd *cd; - struct scsi_cmnd *SCpnt; - struct scsi_device *sdp = q->queuedata; + struct request *rq = SCpnt->request; + struct scsi_device *sdp = SCpnt->device; int ret; - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - ret = scsi_setup_blk_pc_cmnd(sdp, rq); - goto out; - } else if (rq->cmd_type != REQ_TYPE_FS) { - ret = BLKPREP_KILL; - goto out; - } ret = scsi_setup_fs_cmnd(sdp, rq); if (ret != BLKPREP_OK) goto out; @@ -517,7 +512,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq) */ ret = BLKPREP_OK; out: - return scsi_prep_return(q, rq, ret); + return ret; } static int sr_block_open(struct block_device *bdev, fmode_t mode) @@ -718,7 +713,6 @@ static int sr_probe(struct device *dev) /* FIXME: need to handle a get_capabilities failure properly ?? */ get_capabilities(cd); - blk_queue_prep_rq(sdev->request_queue, sr_prep_fn); sr_vendor_init(cd); disk->driverfs_dev = &sdev->sdev_gendev; @@ -993,7 +987,6 @@ static int sr_remove(struct device *dev) scsi_autopm_get_device(cd->device); - blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn); del_gendisk(cd->disk); mutex_lock(&sr_ref_mutex); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index afc834e172c6..14eb4b256a03 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -484,7 +484,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd, if (!req) return DRIVER_ERROR << 24; - req->cmd_type = REQ_TYPE_BLOCK_PC; + blk_rq_set_block_pc(req); req->cmd_flags |= REQ_QUIET; mdata->null_mapped = 1; diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 636bbe0ea84c..88220794cc98 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c @@ -364,7 +364,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged) return( 0 ); if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { - TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); return( 1 ); } @@ -388,7 +388,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) !setup_use_tagged_queuing || !cmd->device->tagged_supported) { cmd->tag = TAG_NONE; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); - TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged " "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else { @@ -397,7 +397,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged) cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS ); set_bit( cmd->tag, &ta->allocated ); ta->nr_allocated++; - TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " + dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d " "(now %d tags in use)\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, ta->nr_allocated ); @@ -415,7 +415,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd) if (cmd->tag == TAG_NONE) { hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); - TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); } else if (cmd->tag >= MAX_TAGS) { @@ -426,7 +426,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd) TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; clear_bit( cmd->tag, &ta->allocated ); ta->nr_allocated--; - TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", + dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n", H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); } } @@ -484,7 +484,7 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd) #include <linux/delay.h> -#if 1 +#if NDEBUG static struct { unsigned char mask; const char * name;} @@ -572,12 +572,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance) } } -#else /* !NDEBUG */ - -/* dummies... */ -__inline__ void NCR5380_print(struct Scsi_Host *instance) { }; -__inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; - #endif /* @@ -618,7 +612,7 @@ static inline void NCR5380_all_init (void) { static int done = 0; if (!done) { - INI_PRINTK("scsi : NCR5380_all_init()\n"); + dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n"); done = 1; } } @@ -681,8 +675,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance) Scsi_Cmnd *ptr; unsigned long flags; - NCR_PRINT(NDEBUG_ANY); - NCR_PRINT_PHASE(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); + NCR5380_dprint_phase(NDEBUG_ANY, instance); hostdata = (struct NCR5380_hostdata *)instance->hostdata; @@ -928,7 +922,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, local_irq_restore(flags); - QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), + dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd), (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); /* If queue_command() is called from an interrupt (real one or bottom @@ -998,7 +992,7 @@ static void NCR5380_main (struct work_struct *bl) done = 1; if (!hostdata->connected) { - MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); + dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO ); /* * Search through the issue_queue for a command destined * for a target that's not busy. @@ -1012,12 +1006,8 @@ static void NCR5380_main (struct work_struct *bl) for (tmp = (struct scsi_cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { -#if (NDEBUG & NDEBUG_LISTS) if (prev != tmp) - printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", - tmp, tmp->target, hostdata->busy[tmp->target], - tmp->lun); -#endif + dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun); /* When we find one, remove it from the issue queue. */ /* ++guenther: possible race with Falcon locking */ if ( @@ -1047,9 +1037,9 @@ static void NCR5380_main (struct work_struct *bl) * On failure, we must add the command back to the * issue queue so we can keep trying. */ - MAIN_PRINTK("scsi%d: main(): command for target %d " + dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d " "lun %d removed from issue_queue\n", - HOSTNO, tmp->target, tmp->lun); + HOSTNO, tmp->device->id, tmp->device->lun); /* * REQUEST SENSE commands are issued without tagged * queueing, even on SCSI-II devices because the @@ -1076,7 +1066,7 @@ static void NCR5380_main (struct work_struct *bl) cmd_free_tag( tmp ); #endif local_irq_restore(flags); - MAIN_PRINTK("scsi%d: main(): select() failed, " + dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, " "returned to issue_queue\n", HOSTNO); if (hostdata->connected) break; @@ -1090,10 +1080,10 @@ static void NCR5380_main (struct work_struct *bl) #endif ) { local_irq_restore(flags); - MAIN_PRINTK("scsi%d: main: performing information transfer\n", + dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n", HOSTNO); NCR5380_information_transfer(instance); - MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); + dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO); done = 0; } } while (!done); @@ -1130,7 +1120,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance ) return; } - DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", + dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -1189,27 +1179,27 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) int done = 1, handled = 0; unsigned char basr; - INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO); /* Look for pending interrupts */ basr = NCR5380_read(BUS_AND_STATUS_REG); - INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); + dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr); /* dispatch to appropriate routine if found and done=0 */ if (basr & BASR_IRQ) { - NCR_PRINT(NDEBUG_INTR); + NCR5380_dprint(NDEBUG_INTR, instance); if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { done = 0; // ENABLE_IRQ(); - INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO); NCR5380_reselect(instance); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if (basr & BASR_PARITY_ERROR) { - INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO); (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { - INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO); (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); } else { @@ -1229,7 +1219,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) { - INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); NCR5380_dma_complete( instance ); done = 0; // ENABLE_IRQ(); @@ -1238,7 +1228,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) { /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ if (basr & BASR_PHASE_MATCH) - INT_PRINTK("scsi%d: unknown interrupt, " + dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, " "BASR 0x%x, MR 0x%x, SR 0x%x\n", HOSTNO, basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); @@ -1262,7 +1252,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id) } if (!done) { - INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); + dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO); /* Put a call to NCR5380_main() on the queue... */ queue_main(); } @@ -1338,8 +1328,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, unsigned long flags; hostdata->restart_select = 0; - NCR_PRINT(NDEBUG_ARBITRATION); - ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, + NCR5380_dprint(NDEBUG_ARBITRATION, instance); + dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO, instance->this_id); /* @@ -1385,7 +1375,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, && !hostdata->connected); #endif - ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); + dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO); if (hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); @@ -1406,7 +1396,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); - ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", + dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", HOSTNO); return -1; } @@ -1421,7 +1411,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, hostdata->connected) { NCR5380_write(MODE_REG, MR_BASE); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", + dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", HOSTNO); return -1; } @@ -1444,7 +1434,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, return -1; } - ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); + dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO); /* * Now that we have won arbitration, start Selection process, asserting @@ -1504,7 +1494,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, udelay(1); - SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); + dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); /* * The SCSI specification calls for a 250 ms timeout for the actual @@ -1559,7 +1549,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); if (hostdata->restart_select) printk(KERN_NOTICE "\trestart select\n"); - NCR_PRINT(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return -1; } @@ -1572,7 +1562,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, #endif cmd->scsi_done(cmd); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); - SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); + dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); return 0; } @@ -1597,7 +1587,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, /* Wait for start of REQ/ACK handshake */ while (!(NCR5380_read(STATUS_REG) & SR_REQ)); - SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", + dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n", HOSTNO, cmd->device->id); tmp[0] = IDENTIFY(1, cmd->device->lun); @@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd, data = tmp; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &data); - SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); + dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO); /* XXX need to handle errors here */ hostdata->connected = cmd; #ifndef SUPPORT_TAGS @@ -1680,12 +1670,12 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, */ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); - HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); + dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO); /* Check for phase mismatch */ if ((tmp & PHASE_MASK) != p) { - PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); - NCR_PRINT_PHASE(NDEBUG_PIO); + dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); + NCR5380_dprint_phase(NDEBUG_PIO, instance); break; } @@ -1708,24 +1698,24 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, if (!((p & SR_MSG) && c > 1)) { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK); } else { NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN); - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); } } else { - NCR_PRINT(NDEBUG_PIO); + NCR5380_dprint(NDEBUG_PIO, instance); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); } while (NCR5380_read(STATUS_REG) & SR_REQ); - HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); + dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO); /* * We have several special cases to consider during REQ/ACK handshaking : @@ -1746,7 +1736,7 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance, } } while (--c); - PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); + dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c); *count = c; *data = d; @@ -1854,7 +1844,7 @@ static int NCR5380_transfer_dma( struct Scsi_Host *instance, } hostdata->dma_len = c; - DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", + dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n", HOSTNO, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", *data); @@ -1931,7 +1921,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) phase = (tmp & PHASE_MASK); if (phase != old_phase) { old_phase = phase; - NCR_PRINT_PHASE(NDEBUG_INFORMATION); + NCR5380_dprint_phase(NDEBUG_INFORMATION, instance); } if(phase == PHASE_CMDOUT) { @@ -1996,7 +1986,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) --cmd->SCp.buffers_residual; cmd->SCp.this_residual = cmd->SCp.buffer->length; cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); - INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", + dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n", HOSTNO, cmd->SCp.this_residual, cmd->SCp.buffers_residual); } @@ -2088,7 +2078,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - LNK_PRINTK("scsi%d: target %d lun %d linked command " + dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command " "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); /* Enable reselect interrupts */ @@ -2113,7 +2103,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) * and don't free it! */ cmd->next_link->tag = cmd->tag; cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); - LNK_PRINTK("scsi%d: target %d lun %d linked request " + dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request " "done, calling scsi_done().\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef NCR5380_STATS @@ -2128,7 +2118,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) /* Accept message by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); hostdata->connected = NULL; - QU_PRINTK("scsi%d: command for target %d, lun %d " + dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d " "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); #ifdef SUPPORT_TAGS cmd_free_tag( cmd ); @@ -2142,7 +2132,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) /* ++Andreas: the mid level code knows about QUEUE_FULL now. */ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; - TAG_PRINTK("scsi%d: target %d lun %d returned " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned " "QUEUE_FULL after %d commands\n", HOSTNO, cmd->device->id, cmd->device->lun, ta->nr_allocated); @@ -2186,7 +2176,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); - ASEN_PRINTK("scsi%d: performing request sense\n", + dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO); /* this is initialized from initialize_SCp cmd->SCp.buffer = NULL; @@ -2198,7 +2188,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) SET_NEXT(cmd, hostdata->issue_queue); hostdata->issue_queue = (struct scsi_cmnd *) cmd; local_irq_restore(flags); - QU_PRINTK("scsi%d: REQUEST SENSE added to head of " + dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of " "issue queue\n", H_NO(cmd)); } else #endif /* def AUTOSENSE */ @@ -2238,7 +2228,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) cmd->device->tagged_supported = 0; hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); cmd->tag = TAG_NONE; - TAG_PRINTK("scsi%d: target %d lun %d rejected " + dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected " "QUEUE_TAG message; tagged queuing " "disabled\n", HOSTNO, cmd->device->id, cmd->device->lun); @@ -2255,7 +2245,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) hostdata->connected = NULL; hostdata->disconnected_queue = cmd; local_irq_restore(flags); - QU_PRINTK("scsi%d: command for target %d lun %d was " + dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was " "moved from connected to the " "disconnected_queue\n", HOSTNO, cmd->device->id, cmd->device->lun); @@ -2308,13 +2298,13 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) /* Accept first byte by clearing ACK */ NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); + dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO); len = 2; data = extended_msg + 1; phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, + dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO, (int)extended_msg[1], (int)extended_msg[2]); if (!len && extended_msg[1] <= @@ -2326,7 +2316,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) phase = PHASE_MSGIN; NCR5380_transfer_pio(instance, &phase, &len, &data); - EXT_PRINTK("scsi%d: message received, residual %d\n", + dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n", HOSTNO, len); switch (extended_msg[2]) { @@ -2416,7 +2406,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance) break; default: printk("scsi%d: unknown phase\n", HOSTNO); - NCR_PRINT(NDEBUG_ANY); + NCR5380_dprint(NDEBUG_ANY, instance); } /* switch(phase) */ } /* if (tmp * SR_REQ) */ } /* while (1) */ @@ -2458,7 +2448,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance) target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); - RSL_PRINTK("scsi%d: reselect\n", HOSTNO); + dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO); /* * At this point, we have detected that our SCSI ID is on the bus, @@ -2580,14 +2570,14 @@ static void NCR5380_reselect (struct Scsi_Host *instance) if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && msg[1] == SIMPLE_QUEUE_TAG) tag = msg[2]; - TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " + dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at " "reselection\n", HOSTNO, target_mask, lun, tag); } #endif hostdata->connected = tmp; - RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", - HOSTNO, tmp->target, tmp->lun, tmp->tag); + dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", + HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); } @@ -2622,7 +2612,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) local_irq_save(flags); - ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, + dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)); @@ -2635,7 +2625,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (hostdata->connected == cmd) { - ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO); /* * We should perform BSY checking, and make sure we haven't slipped * into BUS FREE. @@ -2664,11 +2654,11 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) #endif local_irq_restore(flags); cmd->scsi_done(cmd); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } else { /* local_irq_restore(flags); */ printk("scsi%d: abort of connected command failed!\n", HOSTNO); - return SCSI_ABORT_ERROR; + return FAILED; } } #endif @@ -2686,12 +2676,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) SET_NEXT(tmp, NULL); tmp->result = DID_ABORT << 16; local_irq_restore(flags); - ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", + dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n", HOSTNO); /* Tagged queuing note: no tag to free here, hasn't been assigned * yet... */ tmp->scsi_done(tmp); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } /* @@ -2707,8 +2697,8 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) if (hostdata->connected) { local_irq_restore(flags); - ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); - return SCSI_ABORT_SNOOZE; + dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); + return FAILED; } /* @@ -2740,12 +2730,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) tmp = NEXT(tmp)) if (cmd == tmp) { local_irq_restore(flags); - ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO); if (NCR5380_select (instance, cmd, (int) cmd->tag)) - return SCSI_ABORT_BUSY; + return FAILED; - ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); + dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO); do_abort (instance); @@ -2769,7 +2759,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) #endif local_irq_restore(flags); tmp->scsi_done(tmp); - return SCSI_ABORT_SUCCESS; + return SUCCESS; } } @@ -2786,7 +2776,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) local_irq_restore(flags); printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); - return SCSI_ABORT_NOT_RUNNING; + return FAILED; } @@ -2795,7 +2785,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) * * Purpose : reset the SCSI bus. * - * Returns : SCSI_RESET_WAKEUP + * Returns : SUCCESS or FAILURE * */ @@ -2804,7 +2794,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) SETUP_HOSTDATA(cmd->device->host); int i; unsigned long flags; -#if 1 +#if defined(RESET_RUN_DONE) struct scsi_cmnd *connected, *disconnected_queue; #endif @@ -2826,8 +2816,15 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) * through anymore ... */ (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); -#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ - /* XXX see below XXX */ + /* MSch 20140115 - looking at the generic NCR5380 driver, all of this + * should go. + * Catch-22: if we don't clear all queues, the SCSI driver lock will + * not be released by atari_scsi_reset()! + */ + +#if defined(RESET_RUN_DONE) + /* XXX Should now be done by midlevel code, but it's broken XXX */ + /* XXX see below XXX */ /* MSch: old-style reset: actually abort all command processing here */ @@ -2857,7 +2854,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) */ if ((cmd = connected)) { - ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); cmd->scsi_done( cmd ); } @@ -2869,14 +2866,14 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) cmd->scsi_done( cmd ); } if (i > 0) - ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); + dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i); /* since all commands have been explicitly terminated, we need to tell * the midlevel code that the reset was SUCCESSFUL, and there is no * need to 'wake up' the commands by a request_sense */ - return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; + return SUCCESS; #else /* 1 */ /* MSch: new-style reset handling: let the mid-level do what it can */ @@ -2903,11 +2900,11 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) */ if (hostdata->issue_queue) - ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); if (hostdata->connected) - ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd)); if (hostdata->disconnected_queue) - ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); + dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); local_irq_save(flags); hostdata->issue_queue = NULL; @@ -2924,7 +2921,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd) local_irq_restore(flags); /* we did no complete reset of all commands, so a wakeup is required */ - return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; + return SUCCESS; #endif /* 1 */ } diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index e2c009b033ce..9707b7494a89 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -3,6 +3,10 @@ * * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) * + * VME support added by Sam Creasey + * + * TODO: modify this driver to support multiple Sun3 SCSI VME boards + * * Adapted from mac_scsinew.c: */ /* @@ -45,10 +49,6 @@ * USLEEP - enable support for devices that don't disconnect. Untested. */ -/* - * $Log: sun3_NCR5380.c,v $ - */ - #define AUTOSENSE #include <linux/types.h> @@ -69,23 +69,15 @@ #include <asm/idprom.h> #include <asm/machines.h> -#define NDEBUG 0 - -#define NDEBUG_ABORT 0x00100000 -#define NDEBUG_TAGS 0x00200000 -#define NDEBUG_MERGING 0x00400000 - /* dma on! */ #define REAL_DMA #include "scsi.h" -#include "initio.h" #include <scsi/scsi_host.h> #include "sun3_scsi.h" +#include "NCR5380.h" -static void NCR5380_print(struct Scsi_Host *instance); - -/* #define OLDDMA */ +extern int sun3_map_test(unsigned long, char *); #define USE_WRAPPER /*#define RESET_BOOT */ @@ -101,7 +93,11 @@ static void NCR5380_print(struct Scsi_Host *instance); /* #define SUPPORT_TAGS */ +#ifdef SUN3_SCSI_VME +#define ENABLE_IRQ() +#else #define ENABLE_IRQ() enable_irq( IRQ_SUN3_SCSI ); +#endif static irqreturn_t scsi_sun3_intr(int irq, void *dummy); @@ -123,6 +119,8 @@ module_param(setup_hostid, int, 0); static struct scsi_cmnd *sun3_dma_setup_done = NULL; +#define RESET_RUN_DONE + #define AFTER_RESET_DELAY (HZ/2) /* ms to wait after hitting dma regs */ @@ -136,10 +134,9 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL; static volatile unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; -#ifdef OLDDMA -static unsigned char *dmabuf = NULL; /* dma memory buffer */ -#endif +#ifndef SUN3_SCSI_VME static struct sun3_udc_regs *udc_regs = NULL; +#endif static unsigned char *sun3_dma_orig_addr = NULL; static unsigned long sun3_dma_orig_count = 0; static int sun3_dma_active = 0; @@ -159,6 +156,7 @@ static inline void sun3scsi_write(int reg, int value) sun3_scsi_regp[reg] = value; } +#ifndef SUN3_SCSI_VME /* dma controller register access functions */ static inline unsigned short sun3_udc_read(unsigned char reg) @@ -180,6 +178,7 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg) dregs->udc_data = val; udelay(SUN3_DMA_DELAY); } +#endif /* * XXX: status debug @@ -198,17 +197,32 @@ static struct Scsi_Host *default_instance; * */ -int __init sun3scsi_detect(struct scsi_host_template * tpnt) +static int __init sun3scsi_detect(struct scsi_host_template *tpnt) { - unsigned long ioaddr; + unsigned long ioaddr, irq; static int called = 0; struct Scsi_Host *instance; +#ifdef SUN3_SCSI_VME + int i; + unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, + IOBASE_SUN3_VMESCSI + 0x4000, + 0 }; + unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, + SUN3_VEC_VMESCSI1, + 0 }; +#endif /* check that this machine has an onboard 5380 */ switch(idprom->id_machtype) { +#ifdef SUN3_SCSI_VME + case SM_SUN3|SM_3_160: + case SM_SUN3|SM_3_260: + break; +#else case SM_SUN3|SM_3_50: case SM_SUN3|SM_3_60: break; +#endif default: return 0; @@ -217,7 +231,11 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) if(called) return 0; +#ifdef SUN3_SCSI_VME + tpnt->proc_name = "Sun3 5380 VME SCSI"; +#else tpnt->proc_name = "Sun3 5380 SCSI"; +#endif /* setup variables */ tpnt->can_queue = @@ -234,6 +252,38 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) tpnt->this_id = 7; } +#ifdef SUN3_SCSI_VME + ioaddr = 0; + for (i = 0; addrs[i] != 0; i++) { + unsigned char x; + + ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, + SUN3_PAGE_TYPE_VME16); + irq = vecs[i]; + sun3_scsi_regp = (unsigned char *)ioaddr; + + dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); + + if (sun3_map_test((unsigned long)dregs, &x)) { + unsigned short oldcsr; + + oldcsr = dregs->csr; + dregs->csr = 0; + udelay(SUN3_DMA_DELAY); + if (dregs->csr == 0x1400) + break; + + dregs->csr = oldcsr; + } + + iounmap((void *)ioaddr); + ioaddr = 0; + } + + if (!ioaddr) + return 0; +#else + irq = IRQ_SUN3_SCSI; ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE); sun3_scsi_regp = (unsigned char *)ioaddr; @@ -244,11 +294,6 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); return 0; } -#ifdef OLDDMA - if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) { - printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); - return 0; - } #endif #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) @@ -262,7 +307,7 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) default_instance = instance; instance->io_port = (unsigned long) ioaddr; - instance->irq = IRQ_SUN3_SCSI; + instance->irq = irq; NCR5380_init(instance, 0); @@ -283,7 +328,8 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) #endif } - printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port); + pr_info("scsi%d: %s at port %lX irq", instance->host_no, + tpnt->proc_name, instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk ("s disabled"); else @@ -300,6 +346,15 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt) dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; udelay(SUN3_DMA_DELAY); dregs->fifo_count = 0; +#ifdef SUN3_SCSI_VME + dregs->fifo_count_hi = 0; + dregs->dma_addr_hi = 0; + dregs->dma_addr_lo = 0; + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + + dregs->ivect = VME_DATA24 | (instance->irq & 0xff); +#endif called = 1; @@ -367,7 +422,8 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance) } #endif -const char * sun3scsi_info (struct Scsi_Host *spnt) { +static const char *sun3scsi_info(struct Scsi_Host *spnt) +{ return ""; } @@ -379,6 +435,10 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy) unsigned short csr = dregs->csr; int handled = 0; +#ifdef SUN3_SCSI_VME + dregs->csr &= ~CSR_DMA_ENABLE; +#endif + if(csr & ~CSR_GOOD) { if(csr & CSR_DMA_BUSERR) { printk("scsi%d: bus error in dma\n", default_instance->host_no); @@ -422,31 +482,28 @@ void sun3_sun3_debug (void) /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) { -#ifdef OLDDMA - if(write_flag) - memcpy(dmabuf, data, count); - else { - sun3_dma_orig_addr = data; - sun3_dma_orig_count = count; - } -#else void *addr; if(sun3_dma_orig_addr != NULL) dvma_unmap(sun3_dma_orig_addr); -// addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); +#ifdef SUN3_SCSI_VME + addr = (void *)dvma_map_vme((unsigned long) data, count); +#else addr = (void *)dvma_map((unsigned long) data, count); +#endif sun3_dma_orig_addr = addr; sun3_dma_orig_count = count; -#endif + +#ifndef SUN3_SCSI_VME dregs->fifo_count = 0; sun3_udc_write(UDC_RESET, UDC_CSR); /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; +#endif /* set direction */ if(write_flag) @@ -454,6 +511,17 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri else dregs->csr &= ~CSR_SEND; +#ifdef SUN3_SCSI_VME + dregs->csr |= CSR_PACK_ENABLE; + + dregs->dma_addr_hi = ((unsigned long)addr >> 16); + dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); + + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + dregs->fifo_count_hi = 0; + dregs->fifo_count = 0; +#else /* byte count for fifo */ dregs->fifo_count = count; @@ -467,17 +535,12 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri printk("scsi%d: fifo_mismatch %04x not %04x\n", default_instance->host_no, dregs->fifo_count, (unsigned int) count); - NCR5380_print(default_instance); + NCR5380_dprint(NDEBUG_DMA, default_instance); } /* setup udc */ -#ifdef OLDDMA - udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8); - udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff); -#else udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8); udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); -#endif udc_regs->count = count/2; /* count in words */ udc_regs->mode_hi = UDC_MODE_HIWORD; if(write_flag) { @@ -501,11 +564,13 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri /* interrupt enable */ sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); +#endif return count; } +#ifndef SUN3_SCSI_VME static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) { unsigned short resid; @@ -518,6 +583,7 @@ static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance) return (unsigned long) resid; } +#endif static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) { @@ -536,8 +602,23 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) { +#ifdef SUN3_SCSI_VME + unsigned short csr; + + csr = dregs->csr; + dregs->dma_count_hi = (sun3_dma_orig_count >> 16); + dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); + + dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); + dregs->fifo_count = (sun3_dma_orig_count & 0xffff); + +/* if(!(csr & CSR_DMA_ENABLE)) + * dregs->csr |= CSR_DMA_ENABLE; + */ +#else sun3_udc_write(UDC_CHN_START, UDC_CSR); +#endif return 0; } @@ -545,12 +626,46 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data) /* clean up after our dma is done */ static int sun3scsi_dma_finish(int write_flag) { - unsigned short count; + unsigned short __maybe_unused count; unsigned short fifo; int ret = 0; sun3_dma_active = 0; -#if 1 + +#ifdef SUN3_SCSI_VME + dregs->csr &= ~CSR_DMA_ENABLE; + + fifo = dregs->fifo_count; + if (write_flag) { + if ((fifo > 0) && (fifo < sun3_dma_orig_count)) + fifo++; + } + + last_residual = fifo; + /* empty bytes from the fifo which didn't make it */ + if ((!write_flag) && (dregs->csr & CSR_LEFT)) { + unsigned char *vaddr; + + vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); + + vaddr += (sun3_dma_orig_count - fifo); + vaddr--; + + switch (dregs->csr & CSR_LEFT) { + case CSR_LEFT_3: + *vaddr = (dregs->bpack_lo & 0xff00) >> 8; + vaddr--; + + case CSR_LEFT_2: + *vaddr = (dregs->bpack_hi & 0x00ff); + vaddr--; + + case CSR_LEFT_1: + *vaddr = (dregs->bpack_hi & 0xff00) >> 8; + break; + } + } +#else // check to empty the fifo on a read if(!write_flag) { int tmo = 20000; /* .2 sec */ @@ -566,28 +681,8 @@ static int sun3scsi_dma_finish(int write_flag) udelay(10); } } - -#endif count = sun3scsi_dma_count(default_instance); -#ifdef OLDDMA - - /* if we've finished a read, copy out the data we read */ - if(sun3_dma_orig_addr) { - /* check for residual bytes after dma end */ - if(count && (NCR5380_read(BUS_AND_STATUS_REG) & - (BASR_PHASE_MATCH | BASR_ACK))) { - printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no); - printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG)); - ret = count; - } - - /* copy in what we dma'd no matter what */ - memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count); - sun3_dma_orig_addr = NULL; - - } -#else fifo = dregs->fifo_count; last_residual = fifo; @@ -605,10 +700,23 @@ static int sun3scsi_dma_finish(int write_flag) vaddr[-2] = (data & 0xff00) >> 8; vaddr[-1] = (data & 0xff); } +#endif dvma_unmap(sun3_dma_orig_addr); sun3_dma_orig_addr = NULL; -#endif + +#ifdef SUN3_SCSI_VME + dregs->dma_addr_hi = 0; + dregs->dma_addr_lo = 0; + dregs->dma_count_hi = 0; + dregs->dma_count_lo = 0; + + dregs->fifo_count = 0; + dregs->fifo_count_hi = 0; + + dregs->csr &= ~CSR_SEND; +/* dregs->csr |= CSR_DMA_ENABLE; */ +#else sun3_udc_write(UDC_RESET, UDC_CSR); dregs->fifo_count = 0; dregs->csr &= ~CSR_SEND; @@ -616,6 +724,7 @@ static int sun3scsi_dma_finish(int write_flag) /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; +#endif sun3_dma_setup_done = NULL; diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h index a8da9c710fea..e96a37cf06ac 100644 --- a/drivers/scsi/sun3_scsi.h +++ b/drivers/scsi/sun3_scsi.h @@ -29,12 +29,8 @@ * 1+ (800) 334-5454 */ -/* - * $Log: cumana_NCR5380.h,v $ - */ - -#ifndef SUN3_NCR5380_H -#define SUN3_NCR5380_H +#ifndef SUN3_SCSI_H +#define SUN3_SCSI_H #define SUN3SCSI_PUBLIC_RELEASE 1 @@ -82,8 +78,6 @@ static int sun3scsi_release (struct Scsi_Host *); #define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI" #endif -#ifndef HOSTS_C - #define NCR5380_implementation_fields \ int port, ctrl @@ -108,9 +102,6 @@ static int sun3scsi_release (struct Scsi_Host *); #define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0) #define NCR5380_dma_residual sun3scsi_dma_residual -#define BOARD_NORMAL 0 -#define BOARD_NCR53C400 1 - /* additional registers - mainly DMA control regs */ /* these start at regbase + 8 -- directly after the NCR regs */ struct sun3_dma_regs { @@ -191,189 +182,5 @@ struct sun3_udc_regs { #define VME_DATA24 0x3d00 -// debugging printk's, taken from atari_scsi.h -/* Debugging printk definitions: - * - * ARB -> arbitration - * ASEN -> auto-sense - * DMA -> DMA - * HSH -> PIO handshake - * INF -> information transfer - * INI -> initialization - * INT -> interrupt - * LNK -> linked commands - * MAIN -> NCR5380_main() control flow - * NDAT -> no data-out phase - * NWR -> no write commands - * PIO -> PIO transfers - * PDMA -> pseudo DMA (unused on Atari) - * QU -> queues - * RSL -> reselections - * SEL -> selections - * USL -> usleep cpde (unused on Atari) - * LBS -> last byte sent (unused on Atari) - * RSS -> restarting of selections - * EXT -> extended messages - * ABRT -> aborting and resetting - * TAG -> queue tag handling - * MER -> merging of consec. buffers - * - */ - -#include "NCR5380.h" - -#if NDEBUG & NDEBUG_ARBITRATION -#define ARB_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define ARB_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_AUTOSENSE -#define ASEN_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define ASEN_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_DMA -#define DMA_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define DMA_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_HANDSHAKE -#define HSH_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define HSH_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INFORMATION -#define INF_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define INF_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INIT -#define INI_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define INI_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INTR -#define INT_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define INT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_LINKED -#define LNK_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define LNK_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_MAIN -#define MAIN_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define MAIN_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_NO_DATAOUT -#define NDAT_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define NDAT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_NO_WRITE -#define NWR_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define NWR_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_PIO -#define PIO_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define PIO_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_PSEUDO_DMA -#define PDMA_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define PDMA_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_QUEUES -#define QU_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define QU_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_RESELECTION -#define RSL_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define RSL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_SELECTION -#define SEL_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define SEL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_USLEEP -#define USL_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define USL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_LAST_BYTE_SENT -#define LBS_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define LBS_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_RESTART_SELECT -#define RSS_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define RSS_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_EXTENDED -#define EXT_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define EXT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_ABORT -#define ABRT_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define ABRT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_TAGS -#define TAG_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define TAG_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_MERGING -#define MER_PRINTK(format, args...) \ - printk(KERN_DEBUG format , ## args) -#else -#define MER_PRINTK(format, args...) -#endif - -/* conditional macros for NCR5380_print_{,phase,status} */ - -#define NCR_PRINT(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0) - -#define NCR_PRINT_PHASE(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0) - -#define NCR_PRINT_STATUS(mask) \ - ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0) - - - -#endif /* ndef HOSTS_C */ -#endif /* SUN3_NCR5380_H */ +#endif /* SUN3_SCSI_H */ diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index a3dd55d1d2fd..1eeece6e2040 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c @@ -1,589 +1,3 @@ - /* - * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) - * - * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) - * - * VME support added by Sam Creasey - * - * Adapted from sun3_scsi.c -- see there for other headers - * - * TODO: modify this driver to support multiple Sun3 SCSI VME boards - * - */ - -#define AUTOSENSE - -#include <linux/types.h> -#include <linux/stddef.h> -#include <linux/ctype.h> -#include <linux/delay.h> - -#include <linux/module.h> -#include <linux/signal.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/blkdev.h> - -#include <asm/io.h> - -#include <asm/sun3ints.h> -#include <asm/dvma.h> -#include <asm/idprom.h> -#include <asm/machines.h> - #define SUN3_SCSI_VME -#undef SUN3_SCSI_DEBUG - -/* dma on! */ -#define REAL_DMA - -#define NDEBUG 0 - -#define NDEBUG_ABORT 0x00100000 -#define NDEBUG_TAGS 0x00200000 -#define NDEBUG_MERGING 0x00400000 - -#include "scsi.h" -#include "initio.h" -#include <scsi/scsi_host.h> -#include "sun3_scsi.h" - -extern int sun3_map_test(unsigned long, char *); - -#define USE_WRAPPER -/*#define RESET_BOOT */ -#define DRIVER_SETUP - -/* - * BUG can be used to trigger a strange code-size related hang on 2.1 kernels - */ -#ifdef BUG -#undef RESET_BOOT -#undef DRIVER_SETUP -#endif - -/* #define SUPPORT_TAGS */ - -//#define ENABLE_IRQ() enable_irq( SUN3_VEC_VMESCSI0 ); -#define ENABLE_IRQ() - - -static irqreturn_t scsi_sun3_intr(int irq, void *dummy); -static inline unsigned char sun3scsi_read(int reg); -static inline void sun3scsi_write(int reg, int value); - -static int setup_can_queue = -1; -module_param(setup_can_queue, int, 0); -static int setup_cmd_per_lun = -1; -module_param(setup_cmd_per_lun, int, 0); -static int setup_sg_tablesize = -1; -module_param(setup_sg_tablesize, int, 0); -#ifdef SUPPORT_TAGS -static int setup_use_tagged_queuing = -1; -module_param(setup_use_tagged_queuing, int, 0); -#endif -static int setup_hostid = -1; -module_param(setup_hostid, int, 0); - -static struct scsi_cmnd *sun3_dma_setup_done = NULL; - -#define AFTER_RESET_DELAY (HZ/2) - -/* ms to wait after hitting dma regs */ -#define SUN3_DMA_DELAY 10 - -/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ -#define SUN3_DVMA_BUFSIZE 0xe000 - -/* minimum number of bytes to do dma on */ -#define SUN3_DMA_MINSIZE 128 - -static volatile unsigned char *sun3_scsi_regp; -static volatile struct sun3_dma_regs *dregs; -#ifdef OLDDMA -static unsigned char *dmabuf = NULL; /* dma memory buffer */ -#endif -static unsigned char *sun3_dma_orig_addr = NULL; -static unsigned long sun3_dma_orig_count = 0; -static int sun3_dma_active = 0; -static unsigned long last_residual = 0; - -/* - * NCR 5380 register access functions - */ - -static inline unsigned char sun3scsi_read(int reg) -{ - return( sun3_scsi_regp[reg] ); -} - -static inline void sun3scsi_write(int reg, int value) -{ - sun3_scsi_regp[reg] = value; -} - -/* - * XXX: status debug - */ -static struct Scsi_Host *default_instance; - -/* - * Function : int sun3scsi_detect(struct scsi_host_template * tpnt) - * - * Purpose : initializes mac NCR5380 driver based on the - * command line / compile time port and irq definitions. - * - * Inputs : tpnt - template for this SCSI adapter. - * - * Returns : 1 if a host adapter was found, 0 if not. - * - */ - -static int __init sun3scsi_detect(struct scsi_host_template * tpnt) -{ - unsigned long ioaddr, irq = 0; - static int called = 0; - struct Scsi_Host *instance; - int i; - unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, - IOBASE_SUN3_VMESCSI + 0x4000, - 0 }; - unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, - SUN3_VEC_VMESCSI1, - 0 }; - /* check that this machine has an onboard 5380 */ - switch(idprom->id_machtype) { - case SM_SUN3|SM_3_160: - case SM_SUN3|SM_3_260: - break; - - default: - return 0; - } - - if(called) - return 0; - - tpnt->proc_name = "Sun3 5380 VME SCSI"; - - /* setup variables */ - tpnt->can_queue = - (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; - tpnt->cmd_per_lun = - (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; - tpnt->sg_tablesize = - (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; - - if (setup_hostid >= 0) - tpnt->this_id = setup_hostid; - else { - /* use 7 as default */ - tpnt->this_id = 7; - } - - ioaddr = 0; - for(i = 0; addrs[i] != 0; i++) { - unsigned char x; - - ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, - SUN3_PAGE_TYPE_VME16); - irq = vecs[i]; - sun3_scsi_regp = (unsigned char *)ioaddr; - - dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); - - if(sun3_map_test((unsigned long)dregs, &x)) { - unsigned short oldcsr; - - oldcsr = dregs->csr; - dregs->csr = 0; - udelay(SUN3_DMA_DELAY); - if(dregs->csr == 0x1400) - break; - - dregs->csr = oldcsr; - } - - iounmap((void *)ioaddr); - ioaddr = 0; - } - - if(!ioaddr) - return 0; - -#ifdef SUPPORT_TAGS - if (setup_use_tagged_queuing < 0) - setup_use_tagged_queuing = USE_TAGGED_QUEUING; -#endif - - instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); - if(instance == NULL) - return 0; - - default_instance = instance; - - instance->io_port = (unsigned long) ioaddr; - instance->irq = irq; - - NCR5380_init(instance, 0); - - instance->n_io_port = 32; - - ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; - - if (request_irq(instance->irq, scsi_sun3_intr, - 0, "Sun3SCSI-5380VME", instance)) { -#ifndef REAL_DMA - printk("scsi%d: IRQ%d not free, interrupts disabled\n", - instance->host_no, instance->irq); - instance->irq = SCSI_IRQ_NONE; -#else - printk("scsi%d: IRQ%d not free, bailing out\n", - instance->host_no, instance->irq); - return 0; -#endif - } - - printk("scsi%d: Sun3 5380 VME at port %lX irq", instance->host_no, instance->io_port); - if (instance->irq == SCSI_IRQ_NONE) - printk ("s disabled"); - else - printk (" %d", instance->irq); - printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", - instance->can_queue, instance->cmd_per_lun, - SUN3SCSI_PUBLIC_RELEASE); - printk("\nscsi%d:", instance->host_no); - NCR5380_print_options(instance); - printk("\n"); - - dregs->csr = 0; - udelay(SUN3_DMA_DELAY); - dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; - udelay(SUN3_DMA_DELAY); - dregs->fifo_count = 0; - dregs->fifo_count_hi = 0; - dregs->dma_addr_hi = 0; - dregs->dma_addr_lo = 0; - dregs->dma_count_hi = 0; - dregs->dma_count_lo = 0; - - dregs->ivect = VME_DATA24 | (instance->irq & 0xff); - - called = 1; - -#ifdef RESET_BOOT - sun3_scsi_reset_boot(instance); -#endif - - return 1; -} - -int sun3scsi_release (struct Scsi_Host *shpnt) -{ - if (shpnt->irq != SCSI_IRQ_NONE) - free_irq(shpnt->irq, shpnt); - - iounmap((void *)sun3_scsi_regp); - - NCR5380_exit(shpnt); - return 0; -} - -#ifdef RESET_BOOT -/* - * Our 'bus reset on boot' function - */ - -static void sun3_scsi_reset_boot(struct Scsi_Host *instance) -{ - unsigned long end; - - NCR5380_local_declare(); - NCR5380_setup(instance); - - /* - * Do a SCSI reset to clean up the bus during initialization. No - * messing with the queues, interrupts, or locks necessary here. - */ - - printk( "Sun3 SCSI: resetting the SCSI bus..." ); - - /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ -// sun3_disable_irq( IRQ_SUN3_SCSI ); - - /* get in phase */ - NCR5380_write( TARGET_COMMAND_REG, - PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); - - /* assert RST */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); - - /* The min. reset hold time is 25us, so 40us should be enough */ - udelay( 50 ); - - /* reset RST and interrupt */ - NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); - NCR5380_read( RESET_PARITY_INTERRUPT_REG ); - - for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) - barrier(); - - /* switch on SCSI IRQ again */ -// sun3_enable_irq( IRQ_SUN3_SCSI ); - - printk( " done\n" ); -} -#endif - -static const char * sun3scsi_info (struct Scsi_Host *spnt) { - return ""; -} - -// safe bits for the CSR -#define CSR_GOOD 0x060f - -static irqreturn_t scsi_sun3_intr(int irq, void *dummy) -{ - unsigned short csr = dregs->csr; - int handled = 0; - - dregs->csr &= ~CSR_DMA_ENABLE; - - -#ifdef SUN3_SCSI_DEBUG - printk("scsi_intr csr %x\n", csr); -#endif - - if(csr & ~CSR_GOOD) { - if(csr & CSR_DMA_BUSERR) { - printk("scsi%d: bus error in dma\n", default_instance->host_no); -#ifdef SUN3_SCSI_DEBUG - printk("scsi: residual %x count %x addr %p dmaaddr %x\n", - dregs->fifo_count, - dregs->dma_count_lo | (dregs->dma_count_hi << 16), - sun3_dma_orig_addr, - dregs->dma_addr_lo | (dregs->dma_addr_hi << 16)); -#endif - } - - if(csr & CSR_DMA_CONFLICT) { - printk("scsi%d: dma conflict\n", default_instance->host_no); - } - handled = 1; - } - - if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { - NCR5380_intr(irq, dummy); - handled = 1; - } - - return IRQ_RETVAL(handled); -} - -/* - * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk; - * reentering NCR5380_print_status seems to have ugly side effects - */ - -/* this doesn't seem to get used at all -- sam */ -#if 0 -void sun3_sun3_debug (void) -{ - unsigned long flags; - NCR5380_local_declare(); - - if (default_instance) { - local_irq_save(flags); - NCR5380_print_status(default_instance); - local_irq_restore(flags); - } -} -#endif - - -/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ -static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) -{ - void *addr; - - if(sun3_dma_orig_addr != NULL) - dvma_unmap(sun3_dma_orig_addr); - -// addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); - addr = (void *)dvma_map_vme((unsigned long) data, count); - - sun3_dma_orig_addr = addr; - sun3_dma_orig_count = count; - -#ifdef SUN3_SCSI_DEBUG - printk("scsi: dma_setup addr %p count %x\n", addr, count); -#endif - -// dregs->fifo_count = 0; -#if 0 - /* reset fifo */ - dregs->csr &= ~CSR_FIFO; - dregs->csr |= CSR_FIFO; -#endif - /* set direction */ - if(write_flag) - dregs->csr |= CSR_SEND; - else - dregs->csr &= ~CSR_SEND; - - /* reset fifo */ -// dregs->csr &= ~CSR_FIFO; -// dregs->csr |= CSR_FIFO; - - dregs->csr |= CSR_PACK_ENABLE; - - dregs->dma_addr_hi = ((unsigned long)addr >> 16); - dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); - - dregs->dma_count_hi = 0; - dregs->dma_count_lo = 0; - dregs->fifo_count_hi = 0; - dregs->fifo_count = 0; - -#ifdef SUN3_SCSI_DEBUG - printk("scsi: dma_setup done csr %x\n", dregs->csr); -#endif - return count; - -} - -static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) -{ - return last_residual; -} - -static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, - struct scsi_cmnd *cmd, - int write_flag) -{ - if (cmd->request->cmd_type == REQ_TYPE_FS) - return wanted; - else - return 0; -} - -static int sun3scsi_dma_start(unsigned long count, char *data) -{ - - unsigned short csr; - - csr = dregs->csr; -#ifdef SUN3_SCSI_DEBUG - printk("scsi: dma_start data %p count %x csr %x fifo %x\n", data, count, csr, dregs->fifo_count); -#endif - - dregs->dma_count_hi = (sun3_dma_orig_count >> 16); - dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); - - dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); - dregs->fifo_count = (sun3_dma_orig_count & 0xffff); - -// if(!(csr & CSR_DMA_ENABLE)) -// dregs->csr |= CSR_DMA_ENABLE; - - return 0; -} - -/* clean up after our dma is done */ -static int sun3scsi_dma_finish(int write_flag) -{ - unsigned short fifo; - int ret = 0; - - sun3_dma_active = 0; - - dregs->csr &= ~CSR_DMA_ENABLE; - - fifo = dregs->fifo_count; - if(write_flag) { - if((fifo > 0) && (fifo < sun3_dma_orig_count)) - fifo++; - } - - last_residual = fifo; -#ifdef SUN3_SCSI_DEBUG - printk("scsi: residual %x total %x\n", fifo, sun3_dma_orig_count); -#endif - /* empty bytes from the fifo which didn't make it */ - if((!write_flag) && (dregs->csr & CSR_LEFT)) { - unsigned char *vaddr; - -#ifdef SUN3_SCSI_DEBUG - printk("scsi: got left over bytes\n"); -#endif - - vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); - - vaddr += (sun3_dma_orig_count - fifo); - vaddr--; - - switch(dregs->csr & CSR_LEFT) { - case CSR_LEFT_3: - *vaddr = (dregs->bpack_lo & 0xff00) >> 8; - vaddr--; - - case CSR_LEFT_2: - *vaddr = (dregs->bpack_hi & 0x00ff); - vaddr--; - - case CSR_LEFT_1: - *vaddr = (dregs->bpack_hi & 0xff00) >> 8; - break; - } - - - } - - dvma_unmap(sun3_dma_orig_addr); - sun3_dma_orig_addr = NULL; - - dregs->dma_addr_hi = 0; - dregs->dma_addr_lo = 0; - dregs->dma_count_hi = 0; - dregs->dma_count_lo = 0; - - dregs->fifo_count = 0; - dregs->fifo_count_hi = 0; - - dregs->csr &= ~CSR_SEND; - -// dregs->csr |= CSR_DMA_ENABLE; - -#if 0 - /* reset fifo */ - dregs->csr &= ~CSR_FIFO; - dregs->csr |= CSR_FIFO; -#endif - sun3_dma_setup_done = NULL; - - return ret; - -} - -#include "sun3_NCR5380.c" - -static struct scsi_host_template driver_template = { - .name = SUN3_SCSI_NAME, - .detect = sun3scsi_detect, - .release = sun3scsi_release, - .info = sun3scsi_info, - .queuecommand = sun3scsi_queue_command, - .eh_abort_handler = sun3scsi_abort, - .eh_bus_reset_handler = sun3scsi_bus_reset, - .can_queue = CAN_QUEUE, - .this_id = 7, - .sg_tablesize = SG_TABLESIZE, - .cmd_per_lun = CMD_PER_LUN, - .use_clustering = DISABLE_CLUSTERING -}; - - -#include "scsi_module.c" - -MODULE_LICENSE("GPL"); +#include "sun3_scsi.c" diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index a4abce9d526e..8cc80931df14 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -102,10 +102,6 @@ * 15 9-11 */ -/* - * $Log: t128.c,v $ - */ - #include <linux/signal.h> #include <linux/io.h> #include <linux/blkdev.h> diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index 1df82c28e56d..fd68cecc62af 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h @@ -34,10 +34,6 @@ * 1+ (800) 334-5454 */ -/* - * $Log: t128.h,v $ - */ - #ifndef T128_H #define T128_H @@ -107,8 +103,6 @@ static int t128_bus_reset(struct scsi_cmnd *); #define CAN_QUEUE 32 #endif -#ifndef HOSTS_C - #define NCR5380_implementation_fields \ void __iomem *base @@ -148,6 +142,5 @@ static int t128_bus_reset(struct scsi_cmnd *); #define T128_IRQS 0xc4a8 -#endif /* else def HOSTS_C */ #endif /* ndef ASM */ #endif /* T128_H */ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 721050090520..f42d1cee652a 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -196,9 +196,9 @@ enum { * @dword_2: UPIU header DW-2 */ struct utp_upiu_header { - u32 dword_0; - u32 dword_1; - u32 dword_2; + __be32 dword_0; + __be32 dword_1; + __be32 dword_2; }; /** @@ -207,7 +207,7 @@ struct utp_upiu_header { * @cdb: Command Descriptor Block CDB DW-4 to DW-7 */ struct utp_upiu_cmd { - u32 exp_data_transfer_len; + __be32 exp_data_transfer_len; u8 cdb[MAX_CDB_SIZE]; }; @@ -228,10 +228,10 @@ struct utp_upiu_query { u8 idn; u8 index; u8 selector; - u16 reserved_osf; - u16 length; - u32 value; - u32 reserved[2]; + __be16 reserved_osf; + __be16 length; + __be32 value; + __be32 reserved[2]; }; /** @@ -256,9 +256,9 @@ struct utp_upiu_req { * @sense_data: Sense data field DW-8 to DW-12 */ struct utp_cmd_rsp { - u32 residual_transfer_count; - u32 reserved[4]; - u16 sense_data_len; + __be32 residual_transfer_count; + __be32 reserved[4]; + __be16 sense_data_len; u8 sense_data[18]; }; @@ -286,10 +286,10 @@ struct utp_upiu_rsp { */ struct utp_upiu_task_req { struct utp_upiu_header header; - u32 input_param1; - u32 input_param2; - u32 input_param3; - u32 reserved[2]; + __be32 input_param1; + __be32 input_param2; + __be32 input_param3; + __be32 reserved[2]; }; /** @@ -301,9 +301,9 @@ struct utp_upiu_task_req { */ struct utp_upiu_task_rsp { struct utp_upiu_header header; - u32 output_param1; - u32 output_param2; - u32 reserved[3]; + __be32 output_param1; + __be32 output_param2; + __be32 reserved[3]; }; /** diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 04884d663e4e..0c2877251251 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -55,6 +55,9 @@ /* Query request timeout */ #define QUERY_REQ_TIMEOUT 30 /* msec */ +/* Task management command timeout */ +#define TM_CMD_TIMEOUT 100 /* msecs */ + /* Expose the flag value from utp_upiu_query.value */ #define MASK_QUERY_UPIU_FLAG_LOC 0xFF @@ -71,9 +74,22 @@ enum { /* UFSHCD states */ enum { - UFSHCD_STATE_OPERATIONAL, UFSHCD_STATE_RESET, UFSHCD_STATE_ERROR, + UFSHCD_STATE_OPERATIONAL, +}; + +/* UFSHCD error handling flags */ +enum { + UFSHCD_EH_IN_PROGRESS = (1 << 0), +}; + +/* UFSHCD UIC layer error flags */ +enum { + UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ + UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ + UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ + UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */ }; /* Interrupt configuration options */ @@ -83,6 +99,18 @@ enum { UFSHCD_INT_CLEAR, }; +#define ufshcd_set_eh_in_progress(h) \ + (h->eh_flags |= UFSHCD_EH_IN_PROGRESS) +#define ufshcd_eh_in_progress(h) \ + (h->eh_flags & UFSHCD_EH_IN_PROGRESS) +#define ufshcd_clear_eh_in_progress(h) \ + (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) + +static void ufshcd_tmc_handler(struct ufs_hba *hba); +static void ufshcd_async_scan(void *data, async_cookie_t cookie); +static int ufshcd_reset_and_restore(struct ufs_hba *hba); +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); + /* * ufshcd_wait_for_register - wait for register value to change * @hba - per-adapter interface @@ -163,7 +191,7 @@ static inline int ufshcd_is_device_present(u32 reg_hcs) */ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) { - return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; + return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS; } /** @@ -176,19 +204,41 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) static inline int ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp) { - return task_req_descp->header.dword_2 & MASK_OCS; + return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS; } /** * ufshcd_get_tm_free_slot - get a free slot for task management request * @hba: per adapter instance + * @free_slot: pointer to variable with available slot value * - * Returns maximum number of task management request slots in case of - * task management queue full or returns the free slot number + * Get a free tag and lock it until ufshcd_put_tm_slot() is called. + * Returns 0 if free slot is not available, else return 1 with tag value + * in @free_slot. */ -static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) +static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) { - return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); + int tag; + bool ret = false; + + if (!free_slot) + goto out; + + do { + tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); + if (tag >= hba->nutmrs) + goto out; + } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); + + *free_slot = tag; + ret = true; +out: + return ret; +} + +static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) +{ + clear_bit_unlock(slot, &hba->tm_slots_in_use); } /** @@ -390,26 +440,6 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) } /** - * ufshcd_query_to_cpu() - formats the buffer to native cpu endian - * @response: upiu query response to convert - */ -static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response) -{ - response->length = be16_to_cpu(response->length); - response->value = be32_to_cpu(response->value); -} - -/** - * ufshcd_query_to_be() - formats the buffer to big endian - * @request: upiu query request to convert - */ -static inline void ufshcd_query_to_be(struct utp_upiu_query *request) -{ - request->length = cpu_to_be16(request->length); - request->value = cpu_to_be32(request->value); -} - -/** * ufshcd_copy_query_response() - Copy the Query Response and the data * descriptor * @hba: per adapter instance @@ -425,7 +455,6 @@ void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) UPIU_RSP_CODE_OFFSET; memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); - ufshcd_query_to_cpu(&query_res->upiu_res); /* Get the descriptor */ @@ -749,7 +778,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, { struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; struct ufs_query *query = &hba->dev_cmd.query; - u16 len = query->request.upiu_req.length; + u16 len = be16_to_cpu(query->request.upiu_req.length); u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE; /* Query request header */ @@ -766,7 +795,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, /* Copy the Query Request buffer as is */ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE); - ufshcd_query_to_be(&ucd_req_ptr->qr); /* Copy the Descriptor */ if ((len > 0) && (query->request.upiu_req.opcode == @@ -853,10 +881,25 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) tag = cmd->request->tag; - if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { + spin_lock_irqsave(hba->host->host_lock, flags); + switch (hba->ufshcd_state) { + case UFSHCD_STATE_OPERATIONAL: + break; + case UFSHCD_STATE_RESET: err = SCSI_MLQUEUE_HOST_BUSY; - goto out; + goto out_unlock; + case UFSHCD_STATE_ERROR: + set_host_byte(cmd, DID_ERROR); + cmd->scsi_done(cmd); + goto out_unlock; + default: + dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", + __func__, hba->ufshcd_state); + set_host_byte(cmd, DID_BAD_TARGET); + cmd->scsi_done(cmd); + goto out_unlock; } + spin_unlock_irqrestore(hba->host->host_lock, flags); /* acquire the tag to make sure device cmds don't use it */ if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { @@ -893,6 +936,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) /* issue command to the controller */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_send_command(hba, tag); +out_unlock: spin_unlock_irqrestore(hba->host->host_lock, flags); out: return err; @@ -1151,7 +1195,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, } if (flag_res) - *flag_res = (response->upiu_res.value & + *flag_res = (be32_to_cpu(response->upiu_res.value) & MASK_QUERY_UPIU_FLAG_LOC) & 0x1; out_unlock: @@ -1170,7 +1214,7 @@ out_unlock: * * Returns 0 for success, non-zero in case of failure */ -int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, +static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val) { struct ufs_query_req *request; @@ -1195,7 +1239,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, switch (opcode) { case UPIU_QUERY_OPCODE_WRITE_ATTR: request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; - request->upiu_req.value = *attr_val; + request->upiu_req.value = cpu_to_be32(*attr_val); break; case UPIU_QUERY_OPCODE_READ_ATTR: request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; @@ -1222,7 +1266,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, goto out_unlock; } - *attr_val = response->upiu_res.value; + *attr_val = be32_to_cpu(response->upiu_res.value); out_unlock: mutex_unlock(&hba->dev_cmd.lock); @@ -1481,7 +1525,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); * * Returns 0 on success, non-zero value on failure */ -int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) { struct uic_command uic_cmd = {0}; struct completion pwr_done; @@ -1701,11 +1745,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) goto out; } - if (hba->ufshcd_state == UFSHCD_STATE_RESET) - scsi_unblock_requests(hba->host); - - hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - out: return err; } @@ -1831,66 +1870,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) } /** - * ufshcd_do_reset - reset the host controller - * @hba: per adapter instance - * - * Returns SUCCESS/FAILED - */ -static int ufshcd_do_reset(struct ufs_hba *hba) -{ - struct ufshcd_lrb *lrbp; - unsigned long flags; - int tag; - - /* block commands from midlayer */ - scsi_block_requests(hba->host); - - spin_lock_irqsave(hba->host->host_lock, flags); - hba->ufshcd_state = UFSHCD_STATE_RESET; - - /* send controller to reset state */ - ufshcd_hba_stop(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); - - /* abort outstanding commands */ - for (tag = 0; tag < hba->nutrs; tag++) { - if (test_bit(tag, &hba->outstanding_reqs)) { - lrbp = &hba->lrb[tag]; - if (lrbp->cmd) { - scsi_dma_unmap(lrbp->cmd); - lrbp->cmd->result = DID_RESET << 16; - lrbp->cmd->scsi_done(lrbp->cmd); - lrbp->cmd = NULL; - clear_bit_unlock(tag, &hba->lrb_in_use); - } - } - } - - /* complete device management command */ - if (hba->dev_cmd.complete) - complete(hba->dev_cmd.complete); - - /* clear outstanding request/task bit maps */ - hba->outstanding_reqs = 0; - hba->outstanding_tasks = 0; - - /* Host controller enable */ - if (ufshcd_hba_enable(hba)) { - dev_err(hba->dev, - "Reset: Controller initialization failed\n"); - return FAILED; - } - - if (ufshcd_link_startup(hba)) { - dev_err(hba->dev, - "Reset: Link start-up failed\n"); - return FAILED; - } - - return SUCCESS; -} - -/** * ufshcd_slave_alloc - handle initial SCSI device configurations * @sdev: pointer to SCSI device * @@ -1907,6 +1886,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) sdev->use_10_for_ms = 1; scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); + /* allow SCSI layer to restart the device in case of errors */ + sdev->allow_restart = 1; + /* * Inform SCSI Midlayer that the LUN queue depth is same as the * controller queue depth. If a LUN queue depth is less than the @@ -1934,10 +1916,11 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev) * ufshcd_task_req_compl - handle task management request completion * @hba: per adapter instance * @index: index of the completed request + * @resp: task management service response * - * Returns SUCCESS/FAILED + * Returns non-zero value on error, zero on success */ -static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) +static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp) { struct utp_task_req_desc *task_req_descp; struct utp_upiu_task_rsp *task_rsp_upiup; @@ -1958,19 +1941,15 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) task_req_descp[index].task_rsp_upiu; task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); - - if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && - task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) - task_result = FAILED; - else - task_result = SUCCESS; + if (resp) + *resp = (u8)task_result; } else { - task_result = FAILED; - dev_err(hba->dev, - "trc: Invalid ocs = %x\n", ocs_value); + dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", + __func__, ocs_value); } spin_unlock_irqrestore(hba->host->host_lock, flags); - return task_result; + + return ocs_value; } /** @@ -2105,6 +2084,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) case OCS_ABORTED: result |= DID_ABORT << 16; break; + case OCS_INVALID_COMMAND_STATUS: + result |= DID_REQUEUE << 16; + break; case OCS_INVALID_CMD_TABLE_ATTR: case OCS_INVALID_PRDT_ATTR: case OCS_MISMATCH_DATA_BUF_SIZE: @@ -2422,41 +2404,145 @@ out: } /** - * ufshcd_fatal_err_handler - handle fatal errors - * @hba: per adapter instance + * ufshcd_err_handler - handle UFS errors that require s/w attention + * @work: pointer to work structure */ -static void ufshcd_fatal_err_handler(struct work_struct *work) +static void ufshcd_err_handler(struct work_struct *work) { struct ufs_hba *hba; - hba = container_of(work, struct ufs_hba, feh_workq); + unsigned long flags; + u32 err_xfer = 0; + u32 err_tm = 0; + int err = 0; + int tag; + + hba = container_of(work, struct ufs_hba, eh_work); pm_runtime_get_sync(hba->dev); - /* check if reset is already in progress */ - if (hba->ufshcd_state != UFSHCD_STATE_RESET) - ufshcd_do_reset(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ufshcd_state == UFSHCD_STATE_RESET) { + spin_unlock_irqrestore(hba->host->host_lock, flags); + goto out; + } + + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); + + /* Complete requests that have door-bell cleared by h/w */ + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* Clear pending transfer requests */ + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) + if (ufshcd_clear_cmd(hba, tag)) + err_xfer |= 1 << tag; + + /* Clear pending task management requests */ + for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) + if (ufshcd_clear_tm_cmd(hba, tag)) + err_tm |= 1 << tag; + + /* Complete the requests that are cleared by s/w */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* Fatal errors need reset */ + if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || + ((hba->saved_err & UIC_ERROR) && + (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { + err = ufshcd_reset_and_restore(hba); + if (err) { + dev_err(hba->dev, "%s: reset and restore failed\n", + __func__); + hba->ufshcd_state = UFSHCD_STATE_ERROR; + } + /* + * Inform scsi mid-layer that we did reset and allow to handle + * Unit Attention properly. + */ + scsi_report_bus_reset(hba->host, 0); + hba->saved_err = 0; + hba->saved_uic_err = 0; + } + ufshcd_clear_eh_in_progress(hba); + +out: + scsi_unblock_requests(hba->host); pm_runtime_put_sync(hba->dev); } /** - * ufshcd_err_handler - Check for fatal errors - * @work: pointer to a work queue structure + * ufshcd_update_uic_error - check and set fatal UIC error flags. + * @hba: per-adapter instance */ -static void ufshcd_err_handler(struct ufs_hba *hba) +static void ufshcd_update_uic_error(struct ufs_hba *hba) { u32 reg; + /* PA_INIT_ERROR is fatal and needs UIC reset */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + + /* UIC NL/TL/DME errors needs software retry */ + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); + if (reg) + hba->uic_error |= UFSHCD_UIC_NL_ERROR; + + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); + if (reg) + hba->uic_error |= UFSHCD_UIC_TL_ERROR; + + reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); + if (reg) + hba->uic_error |= UFSHCD_UIC_DME_ERROR; + + dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", + __func__, hba->uic_error); +} + +/** + * ufshcd_check_errors - Check for errors that need s/w attention + * @hba: per-adapter instance + */ +static void ufshcd_check_errors(struct ufs_hba *hba) +{ + bool queue_eh_work = false; + if (hba->errors & INT_FATAL_ERRORS) - goto fatal_eh; + queue_eh_work = true; if (hba->errors & UIC_ERROR) { - reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); - if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) - goto fatal_eh; + hba->uic_error = 0; + ufshcd_update_uic_error(hba); + if (hba->uic_error) + queue_eh_work = true; } - return; -fatal_eh: - hba->ufshcd_state = UFSHCD_STATE_ERROR; - schedule_work(&hba->feh_workq); + + if (queue_eh_work) { + /* handle fatal errors only when link is functional */ + if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { + /* block commands from scsi mid-layer */ + scsi_block_requests(hba->host); + + /* transfer error masks to sticky bits */ + hba->saved_err |= hba->errors; + hba->saved_uic_err |= hba->uic_error; + + hba->ufshcd_state = UFSHCD_STATE_ERROR; + schedule_work(&hba->eh_work); + } + } + /* + * if (!queue_eh_work) - + * Other errors are either non-fatal where host recovers + * itself without s/w intervention or errors that will be + * handled by the SCSI core layer. + */ } /** @@ -2469,7 +2555,7 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba) tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; - wake_up_interruptible(&hba->ufshcd_tm_wait_queue); + wake_up(&hba->tm_wq); } /** @@ -2481,7 +2567,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) { hba->errors = UFSHCD_ERROR_MASK & intr_status; if (hba->errors) - ufshcd_err_handler(hba); + ufshcd_check_errors(hba); if (intr_status & UFSHCD_UIC_MASK) ufshcd_uic_cmd_compl(hba, intr_status); @@ -2519,38 +2605,58 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) return retval; } +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) +{ + int err = 0; + u32 mask = 1 << tag; + unsigned long flags; + + if (!test_bit(tag, &hba->outstanding_tasks)) + goto out; + + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + /* poll for max. 1 sec to clear door bell register by h/w */ + err = ufshcd_wait_for_register(hba, + REG_UTP_TASK_REQ_DOOR_BELL, + mask, 0, 1000, 1000); +out: + return err; +} + /** * ufshcd_issue_tm_cmd - issues task management commands to controller * @hba: per adapter instance - * @lrbp: pointer to local reference block + * @lun_id: LUN ID to which TM command is sent + * @task_id: task ID to which the TM command is applicable + * @tm_function: task management function opcode + * @tm_response: task management service response return value * - * Returns SUCCESS/FAILED + * Returns non-zero value on error, zero on success. */ -static int -ufshcd_issue_tm_cmd(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp, - u8 tm_function) +static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, + u8 tm_function, u8 *tm_response) { struct utp_task_req_desc *task_req_descp; struct utp_upiu_task_req *task_req_upiup; struct Scsi_Host *host; unsigned long flags; - int free_slot = 0; + int free_slot; int err; + int task_tag; host = hba->host; - spin_lock_irqsave(host->host_lock, flags); - - /* If task management queue is full */ - free_slot = ufshcd_get_tm_free_slot(hba); - if (free_slot >= hba->nutmrs) { - spin_unlock_irqrestore(host->host_lock, flags); - dev_err(hba->dev, "Task management queue full\n"); - err = FAILED; - goto out; - } + /* + * Get free slot, sleep if slots are unavailable. + * Even though we use wait_event() which sleeps indefinitely, + * the maximum wait time is bounded by %TM_CMD_TIMEOUT. + */ + wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + spin_lock_irqsave(host->host_lock, flags); task_req_descp = hba->utmrdl_base_addr; task_req_descp += free_slot; @@ -2562,18 +2668,15 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, /* Configure task request UPIU */ task_req_upiup = (struct utp_upiu_task_req *) task_req_descp->task_req_upiu; + task_tag = hba->nutrs + free_slot; task_req_upiup->header.dword_0 = UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, - lrbp->lun, lrbp->task_tag); + lun_id, task_tag); task_req_upiup->header.dword_1 = UPIU_HEADER_DWORD(0, tm_function, 0, 0); - task_req_upiup->input_param1 = lrbp->lun; - task_req_upiup->input_param1 = - cpu_to_be32(task_req_upiup->input_param1); - task_req_upiup->input_param2 = lrbp->task_tag; - task_req_upiup->input_param2 = - cpu_to_be32(task_req_upiup->input_param2); + task_req_upiup->input_param1 = cpu_to_be32(lun_id); + task_req_upiup->input_param2 = cpu_to_be32(task_id); /* send command to the controller */ __set_bit(free_slot, &hba->outstanding_tasks); @@ -2582,91 +2685,88 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba, spin_unlock_irqrestore(host->host_lock, flags); /* wait until the task management command is completed */ - err = - wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, - (test_bit(free_slot, - &hba->tm_condition) != 0), - 60 * HZ); + err = wait_event_timeout(hba->tm_wq, + test_bit(free_slot, &hba->tm_condition), + msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { - dev_err(hba->dev, - "Task management command timed-out\n"); - err = FAILED; - goto out; + dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", + __func__, tm_function); + if (ufshcd_clear_tm_cmd(hba, free_slot)) + dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", + __func__, free_slot); + err = -ETIMEDOUT; + } else { + err = ufshcd_task_req_compl(hba, free_slot, tm_response); } + clear_bit(free_slot, &hba->tm_condition); - err = ufshcd_task_req_compl(hba, free_slot); -out: + ufshcd_put_tm_slot(hba, free_slot); + wake_up(&hba->tm_tag_wq); + return err; } /** - * ufshcd_device_reset - reset device and abort all the pending commands + * ufshcd_eh_device_reset_handler - device reset handler registered to + * scsi layer. * @cmd: SCSI command pointer * * Returns SUCCESS/FAILED */ -static int ufshcd_device_reset(struct scsi_cmnd *cmd) +static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct Scsi_Host *host; struct ufs_hba *hba; unsigned int tag; u32 pos; int err; + u8 resp = 0xF; + struct ufshcd_lrb *lrbp; + unsigned long flags; host = cmd->device->host; hba = shost_priv(host); tag = cmd->request->tag; - err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); - if (err == FAILED) + lrbp = &hba->lrb[tag]; + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) + err = resp; goto out; + } - for (pos = 0; pos < hba->nutrs; pos++) { - if (test_bit(pos, &hba->outstanding_reqs) && - (hba->lrb[tag].lun == hba->lrb[pos].lun)) { - - /* clear the respective UTRLCLR register bit */ - ufshcd_utrl_clear(hba, pos); - - clear_bit(pos, &hba->outstanding_reqs); - - if (hba->lrb[pos].cmd) { - scsi_dma_unmap(hba->lrb[pos].cmd); - hba->lrb[pos].cmd->result = - DID_ABORT << 16; - hba->lrb[pos].cmd->scsi_done(cmd); - hba->lrb[pos].cmd = NULL; - clear_bit_unlock(pos, &hba->lrb_in_use); - wake_up(&hba->dev_cmd.tag_wq); - } + /* clear the commands that were pending for corresponding LUN */ + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { + if (hba->lrb[pos].lun == lrbp->lun) { + err = ufshcd_clear_cmd(hba, pos); + if (err) + break; } - } /* end of for */ + } + spin_lock_irqsave(host->host_lock, flags); + ufshcd_transfer_req_compl(hba); + spin_unlock_irqrestore(host->host_lock, flags); out: + if (!err) { + err = SUCCESS; + } else { + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + err = FAILED; + } return err; } /** - * ufshcd_host_reset - Main reset function registered with scsi layer - * @cmd: SCSI command pointer - * - * Returns SUCCESS/FAILED - */ -static int ufshcd_host_reset(struct scsi_cmnd *cmd) -{ - struct ufs_hba *hba; - - hba = shost_priv(cmd->device->host); - - if (hba->ufshcd_state == UFSHCD_STATE_RESET) - return SUCCESS; - - return ufshcd_do_reset(hba); -} - -/** * ufshcd_abort - abort a specific command * @cmd: SCSI command pointer * + * Abort the pending command in device by sending UFS_ABORT_TASK task management + * command, and in host controller by clearing the door-bell register. There can + * be race between controller sending the command to the device while abort is + * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is + * really issued and then try to abort it. + * * Returns SUCCESS/FAILED */ static int ufshcd_abort(struct scsi_cmnd *cmd) @@ -2675,33 +2775,68 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) struct ufs_hba *hba; unsigned long flags; unsigned int tag; - int err; + int err = 0; + int poll_cnt; + u8 resp = 0xF; + struct ufshcd_lrb *lrbp; host = cmd->device->host; hba = shost_priv(host); tag = cmd->request->tag; - spin_lock_irqsave(host->host_lock, flags); + /* If command is already aborted/completed, return SUCCESS */ + if (!(test_bit(tag, &hba->outstanding_reqs))) + goto out; - /* check if command is still pending */ - if (!(test_bit(tag, &hba->outstanding_reqs))) { - err = FAILED; - spin_unlock_irqrestore(host->host_lock, flags); + lrbp = &hba->lrb[tag]; + for (poll_cnt = 100; poll_cnt; poll_cnt--) { + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, + UFS_QUERY_TASK, &resp); + if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { + /* cmd pending in the device */ + break; + } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + u32 reg; + + /* + * cmd not pending in the device, check if it is + * in transition. + */ + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + if (reg & (1 << tag)) { + /* sleep for max. 200us to stabilize */ + usleep_range(100, 200); + continue; + } + /* command completed already */ + goto out; + } else { + if (!err) + err = resp; /* service response error */ + goto out; + } + } + + if (!poll_cnt) { + err = -EBUSY; goto out; } - spin_unlock_irqrestore(host->host_lock, flags); - err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); - if (err == FAILED) + err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, + UFS_ABORT_TASK, &resp); + if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { + if (!err) + err = resp; /* service response error */ + goto out; + } + + err = ufshcd_clear_cmd(hba, tag); + if (err) goto out; scsi_dma_unmap(cmd); spin_lock_irqsave(host->host_lock, flags); - - /* clear the respective UTRLCLR register bit */ - ufshcd_utrl_clear(hba, tag); - __clear_bit(tag, &hba->outstanding_reqs); hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); @@ -2709,6 +2844,129 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) clear_bit_unlock(tag, &hba->lrb_in_use); wake_up(&hba->dev_cmd.tag_wq); out: + if (!err) { + err = SUCCESS; + } else { + dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); + err = FAILED; + } + + return err; +} + +/** + * ufshcd_host_reset_and_restore - reset and restore host controller + * @hba: per-adapter instance + * + * Note that host controller reset may issue DME_RESET to + * local and remote (device) Uni-Pro stack and the attributes + * are reset to default state. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) +{ + int err; + async_cookie_t cookie; + unsigned long flags; + + /* Reset the host controller */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_hba_stop(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + err = ufshcd_hba_enable(hba); + if (err) + goto out; + + /* Establish the link again and restore the device */ + cookie = async_schedule(ufshcd_async_scan, hba); + /* wait for async scan to be completed */ + async_synchronize_cookie(++cookie); + if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) + err = -EIO; +out: + if (err) + dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); + + return err; +} + +/** + * ufshcd_reset_and_restore - reset and re-initialize host/device + * @hba: per-adapter instance + * + * Reset and recover device, host and re-establish link. This + * is helpful to recover the communication in fatal error conditions. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_reset_and_restore(struct ufs_hba *hba) +{ + int err = 0; + unsigned long flags; + + err = ufshcd_host_reset_and_restore(hba); + + /* + * After reset the door-bell might be cleared, complete + * outstanding requests in s/w here. + */ + spin_lock_irqsave(hba->host->host_lock, flags); + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + return err; +} + +/** + * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer + * @cmd - SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ + int err; + unsigned long flags; + struct ufs_hba *hba; + + hba = shost_priv(cmd->device->host); + + /* + * Check if there is any race with fatal error handling. + * If so, wait for it to complete. Even though fatal error + * handling does reset and restore in some cases, don't assume + * anything out of it. We are just avoiding race here. + */ + do { + spin_lock_irqsave(hba->host->host_lock, flags); + if (!(work_pending(&hba->eh_work) || + hba->ufshcd_state == UFSHCD_STATE_RESET)) + break; + spin_unlock_irqrestore(hba->host->host_lock, flags); + dev_dbg(hba->dev, "%s: reset in progress\n", __func__); + flush_work(&hba->eh_work); + } while (1); + + hba->ufshcd_state = UFSHCD_STATE_RESET; + ufshcd_set_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + err = ufshcd_reset_and_restore(hba); + + spin_lock_irqsave(hba->host->host_lock, flags); + if (!err) { + err = SUCCESS; + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + } else { + err = FAILED; + hba->ufshcd_state = UFSHCD_STATE_ERROR; + } + ufshcd_clear_eh_in_progress(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + return err; } @@ -2737,8 +2995,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) goto out; ufshcd_force_reset_auto_bkops(hba); - scsi_scan_host(hba->host); - pm_runtime_put_sync(hba->dev); + hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + + /* If we are in error handling context no need to scan the host */ + if (!ufshcd_eh_in_progress(hba)) { + scsi_scan_host(hba->host); + pm_runtime_put_sync(hba->dev); + } out: return; } @@ -2751,8 +3014,8 @@ static struct scsi_host_template ufshcd_driver_template = { .slave_alloc = ufshcd_slave_alloc, .slave_destroy = ufshcd_slave_destroy, .eh_abort_handler = ufshcd_abort, - .eh_device_reset_handler = ufshcd_device_reset, - .eh_host_reset_handler = ufshcd_host_reset, + .eh_device_reset_handler = ufshcd_eh_device_reset_handler, + .eh_host_reset_handler = ufshcd_eh_host_reset_handler, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -2916,10 +3179,11 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, host->max_cmd_len = MAX_CDB_SIZE; /* Initailize wait queue for task management */ - init_waitqueue_head(&hba->ufshcd_tm_wait_queue); + init_waitqueue_head(&hba->tm_wq); + init_waitqueue_head(&hba->tm_tag_wq); /* Initialize work queues */ - INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); + INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); /* Initialize UIC command mutex */ diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 577679a2d189..acf318e338ed 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -174,15 +174,21 @@ struct ufs_dev_cmd { * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command * @uic_cmd_mutex: mutex for uic command - * @ufshcd_tm_wait_queue: wait queue for task management + * @tm_wq: wait queue for task management + * @tm_tag_wq: wait queue for free task management slots + * @tm_slots_in_use: bit map of task management request slots in use * @pwr_done: completion for power mode change * @tm_condition: condition variable for task management * @ufshcd_state: UFSHCD states + * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits * @ee_ctrl_mask: Exception event control mask - * @feh_workq: Work queue for fatal controller error handling + * @eh_work: Worker to handle UFS errors that require s/w attention * @eeh_work: Worker to handle exception events * @errors: HBA errors + * @uic_error: UFS interconnect layer error status + * @saved_err: sticky error mask + * @saved_uic_err: sticky UIC error mask * @dev_cmd: ufs device management command information * @auto_bkops_enabled: to track whether bkops is enabled in device */ @@ -217,21 +223,27 @@ struct ufs_hba { struct uic_command *active_uic_cmd; struct mutex uic_cmd_mutex; - wait_queue_head_t ufshcd_tm_wait_queue; + wait_queue_head_t tm_wq; + wait_queue_head_t tm_tag_wq; unsigned long tm_condition; + unsigned long tm_slots_in_use; struct completion *pwr_done; u32 ufshcd_state; + u32 eh_flags; u32 intr_mask; u16 ee_ctrl_mask; /* Work Queues */ - struct work_struct feh_workq; + struct work_struct eh_work; struct work_struct eeh_work; /* HBA Errors */ u32 errors; + u32 uic_error; + u32 saved_err; + u32 saved_uic_err; /* Device management request data */ struct ufs_dev_cmd dev_cmd; @@ -263,6 +275,8 @@ static inline void check_upiu_size(void) GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); } +extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state); +extern int ufshcd_resume(struct ufs_hba *hba); extern int ufshcd_runtime_suspend(struct ufs_hba *hba); extern int ufshcd_runtime_resume(struct ufs_hba *hba); extern int ufshcd_runtime_idle(struct ufs_hba *hba); diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 0475c6619a68..9abc7e32b43d 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -304,10 +304,10 @@ enum { * @size: size of physical segment DW-3 */ struct ufshcd_sg_entry { - u32 base_addr; - u32 upper_addr; - u32 reserved; - u32 size; + __le32 base_addr; + __le32 upper_addr; + __le32 reserved; + __le32 size; }; /** @@ -330,10 +330,10 @@ struct utp_transfer_cmd_desc { * @dword3: Descriptor Header DW3 */ struct request_desc_header { - u32 dword_0; - u32 dword_1; - u32 dword_2; - u32 dword_3; + __le32 dword_0; + __le32 dword_1; + __le32 dword_2; + __le32 dword_3; }; /** @@ -352,16 +352,16 @@ struct utp_transfer_req_desc { struct request_desc_header header; /* DW 4-5*/ - u32 command_desc_base_addr_lo; - u32 command_desc_base_addr_hi; + __le32 command_desc_base_addr_lo; + __le32 command_desc_base_addr_hi; /* DW 6 */ - u16 response_upiu_length; - u16 response_upiu_offset; + __le16 response_upiu_length; + __le16 response_upiu_offset; /* DW 7 */ - u16 prd_table_length; - u16 prd_table_offset; + __le16 prd_table_length; + __le16 prd_table_offset; }; /** @@ -376,10 +376,10 @@ struct utp_task_req_desc { struct request_desc_header header; /* DW 4-11 */ - u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; + __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; /* DW 12-19 */ - u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; + __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; }; #endif /* End of Header */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 16bfd50cd3fe..308256b5e4cb 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -23,6 +23,7 @@ #include <linux/virtio_config.h> #include <linux/virtio_scsi.h> #include <linux/cpu.h> +#include <linux/blkdev.h> #include <scsi/scsi_host.h> #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> @@ -37,6 +38,7 @@ struct virtio_scsi_cmd { struct completion *comp; union { struct virtio_scsi_cmd_req cmd; + struct virtio_scsi_cmd_req_pi cmd_pi; struct virtio_scsi_ctrl_tmf_req tmf; struct virtio_scsi_ctrl_an_req an; } req; @@ -73,17 +75,12 @@ struct virtio_scsi_vq { * queue, and also lets the driver optimize the IRQ affinity for the virtqueues * (each virtqueue's affinity is set to the CPU that "owns" the queue). * - * An interesting effect of this policy is that only writes to req_vq need to - * take the tgt_lock. Read can be done outside the lock because: + * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq + * could be done locklessly, but we do not do it yet. * - * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1. - * In that case, no other CPU is reading req_vq: even if they were in - * virtscsi_queuecommand_multi, they would be spinning on tgt_lock. - * - * - reads of req_vq only occur when the target is not idle (reqs != 0). - * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq. - * - * Similarly, decrements of reqs are never concurrent with writes of req_vq. + * Decrements of reqs are never concurrent with writes of req_vq: before the + * decrement reqs will be != 0; after the decrement the virtqueue completion + * routine will not use the req_vq so it can be changed by a new request. * Thus they can happen outside the tgt_lock, provided of course we make reqs * an atomic_t. */ @@ -204,7 +201,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) set_driver_byte(sc, DRIVER_SENSE); } - mempool_free(cmd, virtscsi_cmd_pool); sc->scsi_done(sc); atomic_dec(&tgt->reqs); @@ -238,49 +234,25 @@ static void virtscsi_req_done(struct virtqueue *vq) int index = vq->index - VIRTIO_SCSI_VQ_BASE; struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; - /* - * Read req_vq before decrementing the reqs field in - * virtscsi_complete_cmd. - * - * With barriers: - * - * CPU #0 virtscsi_queuecommand_multi (CPU #1) - * ------------------------------------------------------------ - * lock vq_lock - * read req_vq - * read reqs (reqs = 1) - * write reqs (reqs = 0) - * increment reqs (reqs = 1) - * write req_vq - * - * Possible reordering without barriers: - * - * CPU #0 virtscsi_queuecommand_multi (CPU #1) - * ------------------------------------------------------------ - * lock vq_lock - * read reqs (reqs = 1) - * write reqs (reqs = 0) - * increment reqs (reqs = 1) - * write req_vq - * read (wrong) req_vq - * - * We do not need a full smp_rmb, because req_vq is required to get - * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored - * in the virtqueue as the user token. - */ - smp_read_barrier_depends(); - virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }; +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; if (cmd->comp) complete_all(cmd->comp); - else - mempool_free(cmd, virtscsi_cmd_pool); } static void virtscsi_ctrl_done(struct virtqueue *vq) @@ -291,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }; +static void virtscsi_handle_event(struct work_struct *work); + static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { @@ -298,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct scatterlist sg; unsigned long flags; + INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); @@ -415,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; - INIT_WORK(&event_node->work, virtscsi_handle_event); schedule_work(&event_node->work); } @@ -433,14 +407,13 @@ static void virtscsi_event_done(struct virtqueue *vq) * @cmd : command structure * @req_size : size of the request buffer * @resp_size : size of the response buffer - * @gfp : flags to use for memory allocations */ static int virtscsi_add_cmd(struct virtqueue *vq, struct virtio_scsi_cmd *cmd, - size_t req_size, size_t resp_size, gfp_t gfp) + size_t req_size, size_t resp_size) { struct scsi_cmnd *sc = cmd->sc; - struct scatterlist *sgs[4], req, resp; + struct scatterlist *sgs[6], req, resp; struct sg_table *out, *in; unsigned out_num = 0, in_num = 0; @@ -458,30 +431,38 @@ static int virtscsi_add_cmd(struct virtqueue *vq, sgs[out_num++] = &req; /* Data-out buffer. */ - if (out) + if (out) { + /* Place WRITE protection SGLs before Data OUT payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num++] = scsi_prot_sglist(sc); sgs[out_num++] = out->sgl; + } /* Response header. */ sg_init_one(&resp, &cmd->resp, resp_size); sgs[out_num + in_num++] = &resp; /* Data-in buffer */ - if (in) + if (in) { + /* Place READ protection SGLs before Data IN payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num + in_num++] = scsi_prot_sglist(sc); sgs[out_num + in_num++] = in->sgl; + } - return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp); + return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); } static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, struct virtio_scsi_cmd *cmd, - size_t req_size, size_t resp_size, gfp_t gfp) + size_t req_size, size_t resp_size) { unsigned long flags; int err; bool needs_kick = false; spin_lock_irqsave(&vq->vq_lock, flags); - err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp); + err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); if (!err) needs_kick = virtqueue_kick_prepare(vq->vq); @@ -492,14 +473,46 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, return err; } +static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd, + struct scsi_cmnd *sc) +{ + cmd->lun[0] = 1; + cmd->lun[1] = sc->device->id; + cmd->lun[2] = (sc->device->lun >> 8) | 0x40; + cmd->lun[3] = sc->device->lun & 0xff; + cmd->tag = (unsigned long)sc; + cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; + cmd->prio = 0; + cmd->crn = 0; +} + +static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi, + struct scsi_cmnd *sc) +{ + struct request *rq = sc->request; + struct blk_integrity *bi; + + virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc); + + if (!rq || !scsi_prot_sg_count(sc)) + return; + + bi = blk_get_integrity(rq->rq_disk); + + if (sc->sc_data_direction == DMA_TO_DEVICE) + cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size; + else if (sc->sc_data_direction == DMA_FROM_DEVICE) + cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size; +} + static int virtscsi_queuecommand(struct virtio_scsi *vscsi, struct virtio_scsi_vq *req_vq, struct scsi_cmnd *sc) { - struct virtio_scsi_cmd *cmd; - int ret; - struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + int req_size; + BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); /* TODO: check feature bit and fail if unsupported? */ @@ -508,36 +521,24 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, dev_dbg(&sc->device->sdev_gendev, "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); - ret = SCSI_MLQUEUE_HOST_BUSY; - cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); - if (!cmd) - goto out; - memset(cmd, 0, sizeof(*cmd)); cmd->sc = sc; - cmd->req.cmd = (struct virtio_scsi_cmd_req){ - .lun[0] = 1, - .lun[1] = sc->device->id, - .lun[2] = (sc->device->lun >> 8) | 0x40, - .lun[3] = sc->device->lun & 0xff, - .tag = (unsigned long)sc, - .task_attr = VIRTIO_SCSI_S_SIMPLE, - .prio = 0, - .crn = 0, - }; BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); - memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); - if (virtscsi_kick_cmd(req_vq, cmd, - sizeof cmd->req.cmd, sizeof cmd->resp.cmd, - GFP_ATOMIC) == 0) - ret = 0; - else - mempool_free(cmd, virtscsi_cmd_pool); + if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { + virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc); + memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd_pi); + } else { + virtio_scsi_init_hdr(&cmd->req.cmd, sc); + memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd); + } -out: - return ret; + if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) + return SCSI_MLQUEUE_HOST_BUSY; + return 0; } static int virtscsi_queuecommand_single(struct Scsi_Host *sh, @@ -560,12 +561,8 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, spin_lock_irqsave(&tgt->tgt_lock, flags); - /* - * The memory barrier after atomic_inc_return matches - * the smp_read_barrier_depends() in virtscsi_req_done. - */ if (atomic_inc_return(&tgt->reqs) > 1) - vq = ACCESS_ONCE(tgt->req_vq); + vq = tgt->req_vq; else { queue_num = smp_processor_id(); while (unlikely(queue_num >= vscsi->num_queues)) @@ -596,8 +593,7 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) cmd->comp = ∁ if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, - sizeof cmd->req.tmf, sizeof cmd->resp.tmf, - GFP_NOIO) < 0) + sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0) goto out; wait_for_completion(&comp); @@ -605,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) ret = SUCCESS; + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + out: mempool_free(cmd, virtscsi_cmd_pool); return ret; @@ -683,6 +691,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .name = "Virtio SCSI HBA", .proc_name = "virtio_scsi", .this_id = -1, + .cmd_size = sizeof(struct virtio_scsi_cmd), .queuecommand = virtscsi_queuecommand_single, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, @@ -699,6 +708,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .name = "Virtio SCSI HBA", .proc_name = "virtio_scsi", .this_id = -1, + .cmd_size = sizeof(struct virtio_scsi_cmd), .queuecommand = virtscsi_queuecommand_multi, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, @@ -750,8 +760,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) vscsi->affinity_hint_set = true; } else { - for (i = 0; i < vscsi->num_queues; i++) + for (i = 0; i < vscsi->num_queues; i++) { + if (!vscsi->req_vqs[i].vq) + continue; + virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); + } vscsi->affinity_hint_set = false; } @@ -871,7 +885,7 @@ static int virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; - int err; + int err, host_prot; u32 sg_elems, num_targets; u32 cmd_per_lun; u32 num_queues; @@ -921,6 +935,16 @@ static int virtscsi_probe(struct virtio_device *vdev) shost->max_id = num_targets; shost->max_channel = 0; shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(shost, host_prot); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + } + err = scsi_add_host(shost, &vdev->dev); if (err) goto scsi_add_host_failed; @@ -990,6 +1014,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_SCSI_F_HOTPLUG, VIRTIO_SCSI_F_CHANGE, + VIRTIO_SCSI_F_T10_PI, }; static struct virtio_driver virtio_scsi_driver = { |