diff options
Diffstat (limited to 'drivers/scsi')
102 files changed, 34033 insertions, 6516 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index fb2740789b68..6a19ed9a1194 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -191,20 +191,19 @@ config SCSI_ENCLOSURE it has an enclosure device. Selecting this option will just allow certain enclosure conditions to be reported and is not required. -comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs" - depends on SCSI - config SCSI_MULTI_LUN bool "Probe all LUNs on each SCSI device" depends on SCSI help - If you have a SCSI device that supports more than one LUN (Logical - Unit Number), e.g. a CD jukebox, and only one LUN is detected, you - can say Y here to force the SCSI driver to probe for multiple LUNs. - A SCSI device with multiple LUNs acts logically like multiple SCSI - devices. The vast majority of SCSI devices have only one LUN, and - so most people can say N here. The max_luns boot/module parameter - allows to override this setting. + Some devices support more than one LUN (Logical Unit Number) in order + to allow access to several media, e.g. CD jukebox, USB card reader, + mobile phone in mass storage mode. This option forces the kernel to + probe for all LUNs by default. This setting can be overriden by + max_luns boot/module parameter. Note that this option does not affect + devices conforming to SCSI-3 or higher as they can explicitely report + their number of LUNs. It is safe to say Y here unless you have one of + those rare devices which reacts in an unexpected way when probed for + multiple LUNs. config SCSI_CONSTANTS bool "Verbose SCSI error reporting (kernel size +=12K)" @@ -355,6 +354,7 @@ config ISCSI_TCP http://open-iscsi.org source "drivers/scsi/cxgb3i/Kconfig" +source "drivers/scsi/bnx2i/Kconfig" config SGIWD93_SCSI tristate "SGI WD93C93 SCSI Driver" @@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD source "drivers/scsi/aic7xxx/Kconfig.aic79xx" source "drivers/scsi/aic94xx/Kconfig" +source "drivers/scsi/mvsas/Kconfig" config SCSI_DPT_I2O tristate "Adaptec I2O RAID support " @@ -1050,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR Generally, saying N is fine. -config SCSI_MVSAS - tristate "Marvell 88SE6440 SAS/SATA support" - depends on PCI && SCSI - select SCSI_SAS_LIBSAS - help - This driver supports Marvell SAS/SATA PCI devices. - - To compiler this driver as a module, choose M here: the module - will be called mvsas. - config SCSI_NCR53C406A tristate "NCR53c406a SCSI support" depends on ISA && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index a5049cfb40ed..25429ea63d0a 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -126,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_STEX) += stex.o -obj-$(CONFIG_SCSI_MVSAS) += mvsas.o +obj-$(CONFIG_SCSI_MVSAS) += mvsas/ obj-$(CONFIG_PS3_ROM) += ps3rom.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ +obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_ARM) += arm/ diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c index c889d8458684..1cdf09a4779a 100644 --- a/drivers/scsi/NCR_D700.c +++ b/drivers/scsi/NCR_D700.c @@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, return ret; } -static int +static irqreturn_t NCR_D700_intr(int irq, void *data) { struct NCR_D700_private *p = (struct NCR_D700_private *)data; diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h new file mode 100644 index 000000000000..2fceb19eb27b --- /dev/null +++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h @@ -0,0 +1,155 @@ +/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ +#ifndef __57XX_ISCSI_CONSTANTS_H_ +#define __57XX_ISCSI_CONSTANTS_H_ + +/** +* This file defines HSI constants for the iSCSI flows +*/ + +/* iSCSI request op codes */ +#define ISCSI_OPCODE_CLEANUP_REQUEST (7) + +/* iSCSI response/messages op codes */ +#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27) +#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0) + +/* iSCSI task types */ +#define ISCSI_TASK_TYPE_READ (0) +#define ISCSI_TASK_TYPE_WRITE (1) +#define ISCSI_TASK_TYPE_MPATH (2) + +/* initial CQ sequence numbers */ +#define ISCSI_INITIAL_SN (1) + +/* KWQ (kernel work queue) layer codes */ +#define ISCSI_KWQE_LAYER_CODE (6) + +/* KWQ (kernel work queue) request op codes */ +#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0) +#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1) +#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2) +#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3) +#define ISCSI_KWQE_OPCODE_INIT1 (4) +#define ISCSI_KWQE_OPCODE_INIT2 (5) + +/* KCQ (kernel completion queue) response op codes */ +#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10) +#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12) +#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13) +#define ISCSI_KCQE_OPCODE_INIT (0x14) +#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15) +#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16) +#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17) +#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18) +#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19) +#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) +#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21) + +/* KCQ (kernel completion queue) completion status */ +#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0) +#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1) +#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2) +#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3) +#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4) + +#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5) +#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6) + +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe) + +/* Response */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17) + +/* Data-In */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d) + +/* R2T */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27) + +/* TMF */ +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a) +#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b) + +/* IP/TCP processing errors: */ +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42) +#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43) + +/* iSCSI licensing errors */ +/* general iSCSI license not installed */ +#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50) +/* additional LOM specific iSCSI license not installed */ +#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51) + +/* SQ/RQ/CQ DB structure sizes */ +#define ISCSI_SQ_DB_SIZE (16) +#define ISCSI_RQ_DB_SIZE (16) +#define ISCSI_CQ_DB_SIZE (80) + +#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF + +/* Page size codes (for flags field in connection offload request) */ +#define ISCSI_PAGE_SIZE_256 (0) +#define ISCSI_PAGE_SIZE_512 (1) +#define ISCSI_PAGE_SIZE_1K (2) +#define ISCSI_PAGE_SIZE_2K (3) +#define ISCSI_PAGE_SIZE_4K (4) +#define ISCSI_PAGE_SIZE_8K (5) +#define ISCSI_PAGE_SIZE_16K (6) +#define ISCSI_PAGE_SIZE_32K (7) +#define ISCSI_PAGE_SIZE_64K (8) +#define ISCSI_PAGE_SIZE_128K (9) +#define ISCSI_PAGE_SIZE_256K (10) +#define ISCSI_PAGE_SIZE_512K (11) +#define ISCSI_PAGE_SIZE_1M (12) +#define ISCSI_PAGE_SIZE_2M (13) +#define ISCSI_PAGE_SIZE_4M (14) +#define ISCSI_PAGE_SIZE_8M (15) + +/* Iscsi PDU related defines */ +#define ISCSI_HEADER_SIZE (48) +#define ISCSI_DIGEST_SHIFT (2) +#define ISCSI_DIGEST_SIZE (4) + +#define B577XX_ISCSI_CONNECTION_TYPE 3 + +#endif /*__57XX_ISCSI_CONSTANTS_H_ */ diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h new file mode 100644 index 000000000000..36af1afef9b6 --- /dev/null +++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h @@ -0,0 +1,1509 @@ +/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ +#ifndef __57XX_ISCSI_HSI_LINUX_LE__ +#define __57XX_ISCSI_HSI_LINUX_LE__ + +/* + * iSCSI Async CQE + */ +struct bnx2i_async_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u8 async_event; + u8 async_vcode; + u16 param1; +#elif defined(__LITTLE_ENDIAN) + u16 param1; + u8 async_vcode; + u8 async_event; +#endif +#if defined(__BIG_ENDIAN) + u16 param2; + u16 param3; +#elif defined(__LITTLE_ENDIAN) + u16 param3; + u16 param2; +#endif + u32 reserved7[3]; + u32 cq_req_sn; +}; + + +/* + * iSCSI Buffer Descriptor (BD) + */ +struct iscsi_bd { + u32 buffer_addr_hi; + u32 buffer_addr_lo; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 buffer_length; +#elif defined(__LITTLE_ENDIAN) + u16 buffer_length; + u16 reserved0; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 flags; +#define ISCSI_BD_RESERVED1 (0x3F<<0) +#define ISCSI_BD_RESERVED1_SHIFT 0 +#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) +#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 +#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) +#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 +#define ISCSI_BD_RESERVED2 (0xFF<<8) +#define ISCSI_BD_RESERVED2_SHIFT 8 +#elif defined(__LITTLE_ENDIAN) + u16 flags; +#define ISCSI_BD_RESERVED1 (0x3F<<0) +#define ISCSI_BD_RESERVED1_SHIFT 0 +#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) +#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 +#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) +#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 +#define ISCSI_BD_RESERVED2 (0xFF<<8) +#define ISCSI_BD_RESERVED2_SHIFT 8 + u16 reserved3; +#endif +}; + + +/* + * iSCSI Cleanup SQ WQE + */ +struct bnx2i_cleanup_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2[3]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 itt; +#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) +#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) +#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 + u16 reserved3; +#endif + u32 reserved4[10]; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u16 reserved5; +#elif defined(__LITTLE_ENDIAN) + u16 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Cleanup CQE + */ +struct bnx2i_cleanup_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 status; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 status; + u8 op_code; +#endif + u32 reserved1[3]; + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[7]; +#if defined(__BIG_ENDIAN) + u16 reserved6; + u16 itt; +#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 + u16 reserved6; +#endif + u32 cq_req_sn; +}; + + +/* + * SCSI read/write SQ WQE + */ +struct bnx2i_cmd_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) +#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 +#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) +#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 +#define ISCSI_CMD_REQUEST_WRITE (0x1<<5) +#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 +#define ISCSI_CMD_REQUEST_READ (0x1<<6) +#define ISCSI_CMD_REQUEST_READ_SHIFT 6 +#define ISCSI_CMD_REQUEST_FINAL (0x1<<7) +#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) +#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 +#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) +#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 +#define ISCSI_CMD_REQUEST_WRITE (0x1<<5) +#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 +#define ISCSI_CMD_REQUEST_READ (0x1<<6) +#define ISCSI_CMD_REQUEST_READ_SHIFT 6 +#define ISCSI_CMD_REQUEST_FINAL (0x1<<7) +#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 + u8 op_code; +#endif +#if defined(__BIG_ENDIAN) + u16 ud_buffer_offset; + u16 sd_buffer_offset; +#elif defined(__LITTLE_ENDIAN) + u16 sd_buffer_offset; + u16 ud_buffer_offset; +#endif + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CMD_REQUEST_TYPE (0x3<<14) +#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 +#define ISCSI_CMD_REQUEST_TYPE (0x3<<14) +#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif + u32 total_data_transfer_length; + u32 cmd_sn; + u32 reserved3; + u32 cdb[4]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 sd_start_bd_index; + u8 ud_start_bd_index; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 ud_start_bd_index; + u8 sd_start_bd_index; + u8 cq_index; +#endif +}; + + +/* + * task statistics for write response + */ +struct bnx2i_write_resp_task_stat { + u32 num_data_ins; +}; + +/* + * task statistics for read response + */ +struct bnx2i_read_resp_task_stat { +#if defined(__BIG_ENDIAN) + u16 num_data_outs; + u16 num_r2ts; +#elif defined(__LITTLE_ENDIAN) + u16 num_r2ts; + u16 num_data_outs; +#endif +}; + +/* + * task statistics for iSCSI cmd response + */ +union bnx2i_cmd_resp_task_stat { + struct bnx2i_write_resp_task_stat write_stat; + struct bnx2i_read_resp_task_stat read_stat; +}; + +/* + * SCSI Command CQE + */ +struct bnx2i_cmd_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) +#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 +#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) +#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 + u8 response; + u8 status; +#elif defined(__LITTLE_ENDIAN) + u8 status; + u8 response; + u8 response_flags; +#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) +#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) +#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) +#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) +#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 +#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) +#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved2; + u32 residual_count; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[5]; + union bnx2i_cmd_resp_task_stat task_stat; + u32 reserved6; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) +#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + + + +/* + * firmware middle-path request SQ WQE + */ +struct bnx2i_fw_mp_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; + u16 hdr_opaque1; +#elif defined(__LITTLE_ENDIAN) + u16 hdr_opaque1; + u8 op_attr; + u8 op_code; +#endif + u32 data_length; + u32 hdr_opaque2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 itt; +#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) +#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) +#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 + u16 reserved0; +#endif + u32 hdr_opaque3[4]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 reserved3; + u8 flags; +#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) +#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) +#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) +#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) +#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 + u8 reserved3; + u16 reserved4; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u8 reserved5; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * firmware response - CQE: used only by firmware + */ +struct bnx2i_fw_response { + u32 hdr_dword1[2]; + u32 hdr_exp_cmd_sn; + u32 hdr_max_cmd_sn; + u32 hdr_ttt; + u32 hdr_res_cnt; + u32 cqe_flags; +#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0) +#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0 +#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8) +#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8 +#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16) +#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16 + u32 stat_sn; + u32 hdr_dword2[2]; + u32 hdr_dword3[2]; + u32 task_stat; + u32 reserved0; + u32 hdr_itt; + u32 cq_req_sn; +}; + + +/* + * iSCSI KCQ CQE parameters + */ +union iscsi_kcqe_params { + u32 reserved0[4]; +}; + +/* + * iSCSI KCQ CQE + */ +struct iscsi_kcqe { + u32 iscsi_conn_id; + u32 completion_status; + u32 iscsi_conn_context_id; + union iscsi_kcqe_params params; +#if defined(__BIG_ENDIAN) + u8 flags; +#define ISCSI_KCQE_RESERVED0 (0xF<<0) +#define ISCSI_KCQE_RESERVED0_SHIFT 0 +#define ISCSI_KCQE_LAYER_CODE (0x7<<4) +#define ISCSI_KCQE_LAYER_CODE_SHIFT 4 +#define ISCSI_KCQE_RESERVED1 (0x1<<7) +#define ISCSI_KCQE_RESERVED1_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define ISCSI_KCQE_RESERVED0 (0xF<<0) +#define ISCSI_KCQE_RESERVED0_SHIFT 0 +#define ISCSI_KCQE_LAYER_CODE (0x7<<4) +#define ISCSI_KCQE_LAYER_CODE_SHIFT 4 +#define ISCSI_KCQE_RESERVED1 (0x1<<7) +#define ISCSI_KCQE_RESERVED1_SHIFT 7 +#endif +}; + + + +/* + * iSCSI KWQE header + */ +struct iscsi_kwqe_header { +#if defined(__BIG_ENDIAN) + u8 flags; +#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) +#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 +#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 + u8 op_code; +#elif defined(__LITTLE_ENDIAN) + u8 op_code; + u8 flags; +#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) +#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 +#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) +#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 +#endif +}; + +/* + * iSCSI firmware init request 1 + */ +struct iscsi_kwqe_init1 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u8 reserved0; + u8 num_cqs; +#elif defined(__LITTLE_ENDIAN) + u8 num_cqs; + u8 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 dummy_buffer_addr_lo; + u32 dummy_buffer_addr_hi; +#if defined(__BIG_ENDIAN) + u16 num_ccells_per_conn; + u16 num_tasks_per_conn; +#elif defined(__LITTLE_ENDIAN) + u16 num_tasks_per_conn; + u16 num_ccells_per_conn; +#endif +#if defined(__BIG_ENDIAN) + u16 sq_wqes_per_page; + u16 sq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 sq_num_wqes; + u16 sq_wqes_per_page; +#endif +#if defined(__BIG_ENDIAN) + u8 cq_log_wqes_per_page; + u8 flags; +#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) +#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 + u16 cq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 cq_num_wqes; + u8 flags; +#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) +#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) +#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) +#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 +#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) +#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 + u8 cq_log_wqes_per_page; +#endif +#if defined(__BIG_ENDIAN) + u16 cq_num_pages; + u16 sq_num_pages; +#elif defined(__LITTLE_ENDIAN) + u16 sq_num_pages; + u16 cq_num_pages; +#endif +#if defined(__BIG_ENDIAN) + u16 rq_buffer_size; + u16 rq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 rq_num_wqes; + u16 rq_buffer_size; +#endif +}; + +/* + * iSCSI firmware init request 2 + */ +struct iscsi_kwqe_init2 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 max_cq_sqn; +#elif defined(__LITTLE_ENDIAN) + u16 max_cq_sqn; + struct iscsi_kwqe_header hdr; +#endif + u32 error_bit_map[2]; + u32 reserved1[5]; +}; + +/* + * Initial iSCSI connection offload request 1 + */ +struct iscsi_kwqe_conn_offload1 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 iscsi_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 iscsi_conn_id; + struct iscsi_kwqe_header hdr; +#endif + u32 sq_page_table_addr_lo; + u32 sq_page_table_addr_hi; + u32 cq_page_table_addr_lo; + u32 cq_page_table_addr_hi; + u32 reserved0[3]; +}; + +/* + * iSCSI Page Table Entry (PTE) + */ +struct iscsi_pte { + u32 hi; + u32 lo; +}; + +/* + * Initial iSCSI connection offload request 2 + */ +struct iscsi_kwqe_conn_offload2 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 rq_page_table_addr_lo; + u32 rq_page_table_addr_hi; + struct iscsi_pte sq_first_pte; + struct iscsi_pte cq_first_pte; + u32 num_additional_wqes; +}; + + +/* + * Initial iSCSI connection offload request 3 + */ +struct iscsi_kwqe_conn_offload3 { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 reserved1; + struct iscsi_pte qp_first_pte[3]; +}; + + +/* + * iSCSI connection update request + */ +struct iscsi_kwqe_conn_update { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif +#if defined(__BIG_ENDIAN) + u8 session_error_recovery_level; + u8 max_outstanding_r2ts; + u8 reserved2; + u8 conn_flags; +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 +#elif defined(__LITTLE_ENDIAN) + u8 conn_flags; +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) +#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) +#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) +#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) +#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) +#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 + u8 reserved2; + u8 max_outstanding_r2ts; + u8 session_error_recovery_level; +#endif + u32 context_id; + u32 max_send_pdu_length; + u32 max_recv_pdu_length; + u32 first_burst_length; + u32 max_burst_length; + u32 exp_stat_sn; +}; + +/* + * iSCSI destroy connection request + */ +struct iscsi_kwqe_conn_destroy { +#if defined(__BIG_ENDIAN) + struct iscsi_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct iscsi_kwqe_header hdr; +#endif + u32 context_id; + u32 reserved1[6]; +}; + +/* + * iSCSI KWQ WQE + */ +union iscsi_kwqe { + struct iscsi_kwqe_init1 init1; + struct iscsi_kwqe_init2 init2; + struct iscsi_kwqe_conn_offload1 conn_offload1; + struct iscsi_kwqe_conn_offload2 conn_offload2; + struct iscsi_kwqe_conn_update conn_update; + struct iscsi_kwqe_conn_destroy conn_destroy; +}; + +/* + * iSCSI Login SQ WQE + */ +struct bnx2i_login_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) +#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 +#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 + u8 version_max; + u8 version_min; +#elif defined(__LITTLE_ENDIAN) + u8 version_min; + u8 version_max; + u8 op_attr; +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) +#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 +#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 isid_lo; +#if defined(__BIG_ENDIAN) + u16 isid_hi; + u16 tsih; +#elif defined(__LITTLE_ENDIAN) + u16 tsih; + u16 isid_hi; +#endif +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif +#if defined(__BIG_ENDIAN) + u16 cid; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 cid; +#endif + u32 cmd_sn; + u32 exp_stat_sn; + u32 reserved4; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved8; + u8 reserved7; + u8 flags; +#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) +#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) +#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) +#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) +#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 +#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) +#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 + u8 reserved7; + u16 reserved8; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved10; + u8 reserved9; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved9; + u8 reserved10; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Login CQE + */ +struct bnx2i_login_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) +#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 + u8 version_max; + u8 version_active; +#elif defined(__LITTLE_ENDIAN) + u8 version_active; + u8 version_max; + u8 response_flags; +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) +#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) +#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) +#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) +#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) +#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved1[2]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u8 err_code; + u8 reserved2; +#elif defined(__LITTLE_ENDIAN) + u8 reserved2; + u8 err_code; + u16 reserved3; +#endif + u32 stat_sn; + u32 isid_lo; +#if defined(__BIG_ENDIAN) + u16 isid_hi; + u16 tsih; +#elif defined(__LITTLE_ENDIAN) + u16 tsih; + u16 isid_hi; +#endif +#if defined(__BIG_ENDIAN) + u8 status_class; + u8 status_detail; + u16 reserved4; +#elif defined(__LITTLE_ENDIAN) + u16 reserved4; + u8 status_detail; + u8 status_class; +#endif + u32 reserved5[3]; +#if defined(__BIG_ENDIAN) + u16 reserved6; + u16 itt; +#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 + u16 reserved6; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI Logout SQ WQE + */ +struct bnx2i_logout_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) +#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) +#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 reserved1[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif +#if defined(__BIG_ENDIAN) + u16 cid; + u16 reserved3; +#elif defined(__LITTLE_ENDIAN) + u16 reserved3; + u16 cid; +#endif + u32 cmd_sn; + u32 reserved4[5]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved6; + u8 reserved5; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved5; + u8 reserved6; + u8 cq_index; +#endif +}; + + +/* + * iSCSI Logout CQE + */ +struct bnx2i_logout_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 response; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 response; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6[3]; +#if defined(__BIG_ENDIAN) + u16 time_to_wait; + u16 time_to_retain; +#elif defined(__LITTLE_ENDIAN) + u16 time_to_retain; + u16 time_to_wait; +#endif + u32 reserved7[3]; +#if defined(__BIG_ENDIAN) + u16 reserved8; + u16 itt; +#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 + u16 reserved8; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI Nop-In CQE + */ +struct bnx2i_nop_in_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 reserved1; + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 ttt; + u32 reserved2; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5; + u32 lun[2]; + u32 reserved6[4]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) +#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 +#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) +#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) +#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 +#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) +#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + + +/* + * iSCSI NOP-OUT SQ WQE + */ +struct bnx2i_nop_out_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved2; + u16 itt; +#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) +#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 + u16 reserved2; +#endif + u32 ttt; + u32 cmd_sn; + u32 reserved3[2]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24 +#if defined(__BIG_ENDIAN) + u16 reserved7; + u8 reserved6; + u8 flags; +#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 +#elif defined(__LITTLE_ENDIAN) + u8 flags; +#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) +#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) +#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) +#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 + u8 reserved6; + u16 reserved7; +#endif + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved9; + u8 reserved8; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved8; + u8 reserved9; + u8 cq_index; +#endif +}; + +/* + * iSCSI Reject CQE + */ +struct bnx2i_reject_msg { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 reason; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 reason; + u8 reserved1; + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved2[2]; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5[8]; + u32 cq_req_sn; +}; + +/* + * bnx2i iSCSI TMF SQ WQE + */ +struct bnx2i_tmf_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) +#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 +#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) +#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u16 itt; +#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TMF_REQUEST_TYPE (0x3<<14) +#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TMF_REQUEST_TYPE (0x3<<14) +#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 + u16 reserved1; +#endif + u32 ref_itt; + u32 cmd_sn; + u32 reserved2; + u32 ref_cmd_sn; + u32 reserved3[3]; + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved5; + u8 reserved4; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved4; + u8 reserved5; + u8 cq_index; +#endif +}; + +/* + * iSCSI Text SQ WQE + */ +struct bnx2i_text_request { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 op_attr; +#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_REQUEST_CONT (0x1<<6) +#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 +#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) +#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 op_attr; +#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_REQUEST_CONT (0x1<<6) +#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 +#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) +#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 lun[2]; +#if defined(__BIG_ENDIAN) + u16 reserved3; + u16 itt; +#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) +#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 +#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) +#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 + u16 reserved3; +#endif + u32 ttt; + u32 cmd_sn; + u32 reserved4[2]; + u32 resp_bd_list_addr_lo; + u32 resp_bd_list_addr_hi; + u32 resp_buffer; +#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) +#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 +#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24) +#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24 + u32 zero_fill; + u32 bd_list_addr_lo; + u32 bd_list_addr_hi; +#if defined(__BIG_ENDIAN) + u8 cq_index; + u8 reserved7; + u8 reserved6; + u8 num_bds; +#elif defined(__LITTLE_ENDIAN) + u8 num_bds; + u8 reserved6; + u8 reserved7; + u8 cq_index; +#endif +}; + +/* + * iSCSI SQ WQE + */ +union iscsi_request { + struct bnx2i_cmd_request cmd; + struct bnx2i_tmf_request tmf; + struct bnx2i_nop_out_request nop_out; + struct bnx2i_login_request login_req; + struct bnx2i_text_request text; + struct bnx2i_logout_request logout_req; + struct bnx2i_cleanup_request cleanup; +}; + + +/* + * iSCSI TMF CQE + */ +struct bnx2i_tmf_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 reserved1; + u8 response; + u8 reserved0; +#elif defined(__LITTLE_ENDIAN) + u8 reserved0; + u8 response; + u8 reserved1; + u8 op_code; +#endif + u32 reserved2; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 reserved3[2]; +#if defined(__BIG_ENDIAN) + u16 reserved5; + u8 err_code; + u8 reserved4; +#elif defined(__LITTLE_ENDIAN) + u8 reserved4; + u8 err_code; + u16 reserved5; +#endif + u32 reserved6[7]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + +/* + * iSCSI Text CQE + */ +struct bnx2i_text_response { +#if defined(__BIG_ENDIAN) + u8 op_code; + u8 response_flags; +#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) +#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) +#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + u8 response_flags; +#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) +#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) +#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) +#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 + u8 op_code; +#endif + u32 data_length; + u32 exp_cmd_sn; + u32 max_cmd_sn; + u32 ttt; + u32 reserved2; +#if defined(__BIG_ENDIAN) + u16 reserved4; + u8 err_code; + u8 reserved3; +#elif defined(__LITTLE_ENDIAN) + u8 reserved3; + u8 err_code; + u16 reserved4; +#endif + u32 reserved5; + u32 lun[2]; + u32 reserved6[4]; +#if defined(__BIG_ENDIAN) + u16 reserved7; + u16 itt; +#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 +#elif defined(__LITTLE_ENDIAN) + u16 itt; +#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) +#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) +#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 + u16 reserved7; +#endif + u32 cq_req_sn; +}; + +/* + * iSCSI CQE + */ +union iscsi_response { + struct bnx2i_cmd_response cmd; + struct bnx2i_tmf_response tmf; + struct bnx2i_login_response login_resp; + struct bnx2i_text_response text; + struct bnx2i_logout_response logout_resp; + struct bnx2i_cleanup_response cleanup; + struct bnx2i_reject_msg reject; + struct bnx2i_async_msg async; + struct bnx2i_nop_in_msg nop_in; +}; + +#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */ diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig new file mode 100644 index 000000000000..820d428ae839 --- /dev/null +++ b/drivers/scsi/bnx2i/Kconfig @@ -0,0 +1,7 @@ +config SCSI_BNX2_ISCSI + tristate "Broadcom NetXtreme II iSCSI support" + select SCSI_ISCSI_ATTRS + select CNIC + ---help--- + This driver supports iSCSI offload for the Broadcom NetXtreme II + devices. diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile new file mode 100644 index 000000000000..b5802bd2e76a --- /dev/null +++ b/drivers/scsi/bnx2i/Makefile @@ -0,0 +1,3 @@ +bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o + +obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h new file mode 100644 index 000000000000..d7576f28c6e9 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -0,0 +1,771 @@ +/* bnx2i.h: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#ifndef _BNX2I_H_ +#define _BNX2I_H_ + +#include <linux/module.h> +#include <linux/moduleparam.h> + +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/in.h> +#include <linux/kfifo.h> +#include <linux/netdevice.h> +#include <linux/completion.h> + +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi.h> +#include <scsi/iscsi_proto.h> +#include <scsi/libiscsi.h> +#include <scsi/scsi_transport_iscsi.h> + +#include "../../net/cnic_if.h" +#include "57xx_iscsi_hsi.h" +#include "57xx_iscsi_constants.h" + +#define BNX2_ISCSI_DRIVER_NAME "bnx2i" + +#define BNX2I_MAX_ADAPTERS 8 + +#define ISCSI_MAX_CONNS_PER_HBA 128 +#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA +#define ISCSI_MAX_CMDS_PER_SESS 128 + +/* Total active commands across all connections supported by devices */ +#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1)) +#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1)) +#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1)) + +#define ISCSI_MAX_BDS_PER_CMD 32 + +#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 +#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 + +/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ +#define MAX_BD_LENGTH 65535 +#define BD_SPLIT_SIZE 32768 + +/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */ +#define BNX2I_SQ_WQES_MIN 16 +#define BNX2I_570X_SQ_WQES_MAX 128 +#define BNX2I_5770X_SQ_WQES_MAX 512 +#define BNX2I_570X_SQ_WQES_DEFAULT 128 +#define BNX2I_5770X_SQ_WQES_DEFAULT 256 + +#define BNX2I_570X_CQ_WQES_MAX 128 +#define BNX2I_5770X_CQ_WQES_MAX 512 + +#define BNX2I_RQ_WQES_MIN 16 +#define BNX2I_RQ_WQES_MAX 32 +#define BNX2I_RQ_WQES_DEFAULT 16 + +/* CCELLs per conn */ +#define BNX2I_CCELLS_MIN 16 +#define BNX2I_CCELLS_MAX 96 +#define BNX2I_CCELLS_DEFAULT 64 + +#define ITT_INVALID_SIGNATURE 0xFFFF + +#define ISCSI_CMD_CLEANUP_TIMEOUT 100 + +#define BNX2I_CONN_CTX_BUF_SIZE 16384 + +#define BNX2I_SQ_WQE_SIZE 64 +#define BNX2I_RQ_WQE_SIZE 256 +#define BNX2I_CQE_SIZE 64 + +#define MB_KERNEL_CTX_SHIFT 8 +#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) + +#define CTX_SHIFT 7 +#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT) + +#define CTX_OFFSET 0x10000 +#define MAX_CID_CNT 0x4000 + +/* 5709 context registers */ +#define BNX2_MQ_CONFIG2 0x00003d00 +#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4) +#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8) + +/* 57710's BAR2 is mapped to doorbell registers */ +#define BNX2X_DOORBELL_PCI_BAR 2 +#define BNX2X_MAX_CQS 8 + +#define CNIC_ARM_CQE 1 +#define CNIC_DISARM_CQE 0 + +#define REG_RD(__hba, offset) \ + readl(__hba->regview + offset) +#define REG_WR(__hba, offset, val) \ + writel(val, __hba->regview + offset) + + +/** + * struct generic_pdu_resc - login pdu resource structure + * + * @req_buf: driver buffer used to stage payload associated with + * the login request + * @req_dma_addr: dma address for iscsi login request payload buffer + * @req_buf_size: actual login request payload length + * @req_wr_ptr: pointer into login request buffer when next data is + * to be written + * @resp_hdr: iscsi header where iscsi login response header is to + * be recreated + * @resp_buf: buffer to stage login response payload + * @resp_dma_addr: login response payload buffer dma address + * @resp_buf_size: login response paylod length + * @resp_wr_ptr: pointer into login response buffer when next data is + * to be written + * @req_bd_tbl: iscsi login request payload BD table + * @req_bd_dma: login request BD table dma address + * @resp_bd_tbl: iscsi login response payload BD table + * @resp_bd_dma: login request BD table dma address + * + * following structure defines buffer info for generic pdus such as iSCSI Login, + * Logout and NOP + */ +struct generic_pdu_resc { + char *req_buf; + dma_addr_t req_dma_addr; + u32 req_buf_size; + char *req_wr_ptr; + struct iscsi_hdr resp_hdr; + char *resp_buf; + dma_addr_t resp_dma_addr; + u32 resp_buf_size; + char *resp_wr_ptr; + char *req_bd_tbl; + dma_addr_t req_bd_dma; + char *resp_bd_tbl; + dma_addr_t resp_bd_dma; +}; + + +/** + * struct bd_resc_page - tracks DMA'able memory allocated for BD tables + * + * @link: list head to link elements + * @max_ptrs: maximun pointers that can be stored in this page + * @num_valid: number of pointer valid in this page + * @page: base addess for page pointer array + * + * structure to track DMA'able memory allocated for command BD tables + */ +struct bd_resc_page { + struct list_head link; + u32 max_ptrs; + u32 num_valid; + void *page[1]; +}; + + +/** + * struct io_bdt - I/O buffer destricptor table + * + * @bd_tbl: BD table's virtual address + * @bd_tbl_dma: BD table's dma address + * @bd_valid: num valid BD entries + * + * IO BD table + */ +struct io_bdt { + struct iscsi_bd *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + + +/** + * bnx2i_cmd - iscsi command structure + * + * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd + * @sg: SG list + * @io_tbl: buffer descriptor (BD) table + * @bd_tbl_dma: buffer descriptor (BD) table's dma address + */ +struct bnx2i_cmd { + struct iscsi_hdr hdr; + struct bnx2i_conn *conn; + struct scsi_cmnd *scsi_cmd; + struct scatterlist *sg; + struct io_bdt io_tbl; + dma_addr_t bd_tbl_dma; + struct bnx2i_cmd_request req; +}; + + +/** + * struct bnx2i_conn - iscsi connection structure + * + * @cls_conn: pointer to iscsi cls conn + * @hba: adapter structure pointer + * @iscsi_conn_cid: iscsi conn id + * @fw_cid: firmware iscsi context id + * @ep: endpoint structure pointer + * @gen_pdu: login/nopout/logout pdu resources + * @violation_notified: bit mask used to track iscsi error/warning messages + * already printed out + * + * iSCSI connection structure + */ +struct bnx2i_conn { + struct iscsi_cls_conn *cls_conn; + struct bnx2i_hba *hba; + struct completion cmd_cleanup_cmpl; + int is_bound; + + u32 iscsi_conn_cid; +#define BNX2I_CID_RESERVED 0x5AFF + u32 fw_cid; + + struct timer_list poll_timer; + /* + * Queue Pair (QP) related structure elements. + */ + struct bnx2i_endpoint *ep; + + /* + * Buffer for login negotiation process + */ + struct generic_pdu_resc gen_pdu; + u64 violation_notified; +}; + + + +/** + * struct iscsi_cid_queue - Per adapter iscsi cid queue + * + * @cid_que_base: queue base memory + * @cid_que: queue memory pointer + * @cid_q_prod_idx: produce index + * @cid_q_cons_idx: consumer index + * @cid_q_max_idx: max index. used to detect wrap around condition + * @cid_free_cnt: queue size + * @conn_cid_tbl: iscsi cid to conn structure mapping table + * + * Per adapter iSCSI CID Queue + */ +struct iscsi_cid_queue { + void *cid_que_base; + u32 *cid_que; + u32 cid_q_prod_idx; + u32 cid_q_cons_idx; + u32 cid_q_max_idx; + u32 cid_free_cnt; + struct bnx2i_conn **conn_cid_tbl; +}; + +/** + * struct bnx2i_hba - bnx2i adapter structure + * + * @link: list head to link elements + * @cnic: pointer to cnic device + * @pcidev: pointer to pci dev + * @netdev: pointer to netdev structure + * @regview: mapped PCI register space + * @age: age, incremented by every recovery + * @cnic_dev_type: cnic device type, 5706/5708/5709/57710 + * @mail_queue_access: mailbox queue access mode, applicable to 5709 only + * @reg_with_cnic: indicates whether the device is register with CNIC + * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN + * @mtu_supported: Ethernet MTU supported + * @shost: scsi host pointer + * @max_sqes: SQ size + * @max_rqes: RQ size + * @max_cqes: CQ size + * @num_ccell: number of command cells per connection + * @ofld_conns_active: active connection list + * @max_active_conns: max offload connections supported by this device + * @cid_que: iscsi cid queue + * @ep_rdwr_lock: read / write lock to synchronize various ep lists + * @ep_ofld_list: connection list for pending offload completion + * @ep_destroy_list: connection list for pending offload completion + * @mp_bd_tbl: BD table to be used with middle path requests + * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer + * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs + * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer + * @lock: lock to synchonize access to hba structure + * @pci_did: PCI device ID + * @pci_vid: PCI vendor ID + * @pci_sdid: PCI subsystem device ID + * @pci_svid: PCI subsystem vendor ID + * @pci_func: PCI function number in system pci tree + * @pci_devno: PCI device number in system pci tree + * @num_wqe_sent: statistic counter, total wqe's sent + * @num_cqe_rcvd: statistic counter, total cqe's received + * @num_intr_claimed: statistic counter, total interrupts claimed + * @link_changed_count: statistic counter, num of link change notifications + * received + * @ipaddr_changed_count: statistic counter, num times IP address changed while + * at least one connection is offloaded + * @num_sess_opened: statistic counter, total num sessions opened + * @num_conn_opened: statistic counter, total num conns opened on this hba + * @ctx_ccell_tasks: captures number of ccells and tasks supported by + * currently offloaded connection, used to decode + * context memory + * + * Adapter Data Structure + */ +struct bnx2i_hba { + struct list_head link; + struct cnic_dev *cnic; + struct pci_dev *pcidev; + struct net_device *netdev; + void __iomem *regview; + + u32 age; + unsigned long cnic_dev_type; + #define BNX2I_NX2_DEV_5706 0x0 + #define BNX2I_NX2_DEV_5708 0x1 + #define BNX2I_NX2_DEV_5709 0x2 + #define BNX2I_NX2_DEV_57710 0x3 + u32 mail_queue_access; + #define BNX2I_MQ_KERNEL_MODE 0x0 + #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1 + #define BNX2I_MQ_BIN_MODE 0x2 + unsigned long reg_with_cnic; + #define BNX2I_CNIC_REGISTERED 1 + + unsigned long adapter_state; + #define ADAPTER_STATE_UP 0 + #define ADAPTER_STATE_GOING_DOWN 1 + #define ADAPTER_STATE_LINK_DOWN 2 + #define ADAPTER_STATE_INIT_FAILED 31 + unsigned int mtu_supported; + #define BNX2I_MAX_MTU_SUPPORTED 1500 + + struct Scsi_Host *shost; + + u32 max_sqes; + u32 max_rqes; + u32 max_cqes; + u32 num_ccell; + + int ofld_conns_active; + + int max_active_conns; + struct iscsi_cid_queue cid_que; + + rwlock_t ep_rdwr_lock; + struct list_head ep_ofld_list; + struct list_head ep_destroy_list; + + /* + * BD table to be used with MP (Middle Path requests. + */ + char *mp_bd_tbl; + dma_addr_t mp_bd_dma; + char *dummy_buffer; + dma_addr_t dummy_buf_dma; + + spinlock_t lock; /* protects hba structure access */ + struct mutex net_dev_lock;/* sync net device access */ + + /* + * PCI related info. + */ + u16 pci_did; + u16 pci_vid; + u16 pci_sdid; + u16 pci_svid; + u16 pci_func; + u16 pci_devno; + + /* + * Following are a bunch of statistics useful during development + * and later stage for score boarding. + */ + u32 num_wqe_sent; + u32 num_cqe_rcvd; + u32 num_intr_claimed; + u32 link_changed_count; + u32 ipaddr_changed_count; + u32 num_sess_opened; + u32 num_conn_opened; + unsigned int ctx_ccell_tasks; +}; + + +/******************************************************************************* + * QP [ SQ / RQ / CQ ] info. + ******************************************************************************/ + +/* + * SQ/RQ/CQ generic structure definition + */ +struct sqe { + u8 sqe_byte[BNX2I_SQ_WQE_SIZE]; +}; + +struct rqe { + u8 rqe_byte[BNX2I_RQ_WQE_SIZE]; +}; + +struct cqe { + u8 cqe_byte[BNX2I_CQE_SIZE]; +}; + + +enum { +#if defined(__LITTLE_ENDIAN) + CNIC_EVENT_COAL_INDEX = 0x0, + CNIC_SEND_DOORBELL = 0x4, + CNIC_EVENT_CQ_ARM = 0x7, + CNIC_RECV_DOORBELL = 0x8 +#elif defined(__BIG_ENDIAN) + CNIC_EVENT_COAL_INDEX = 0x2, + CNIC_SEND_DOORBELL = 0x6, + CNIC_EVENT_CQ_ARM = 0x4, + CNIC_RECV_DOORBELL = 0xa +#endif +}; + + +/* + * CQ DB + */ +struct bnx2x_iscsi_cq_pend_cmpl { + /* CQ producer, updated by Ustorm */ + u16 ustrom_prod; + /* CQ pending completion counter */ + u16 pend_cntr; +}; + + +struct bnx2i_5771x_cq_db { + struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS]; + /* CQ pending completion ITT array */ + u16 itt[BNX2X_MAX_CQS]; + /* Cstorm CQ sequence to notify array, updated by driver */; + u16 sqn[BNX2X_MAX_CQS]; + u32 reserved[4] /* 16 byte allignment */; +}; + + +struct bnx2i_5771x_sq_rq_db { + u16 prod_idx; + u8 reserved0[14]; /* Pad structure size to 16 bytes */ +}; + + +struct bnx2i_5771x_dbell_hdr { + u8 header; + /* 1 for rx doorbell, 0 for tx doorbell */ +#define B577XX_DOORBELL_HDR_RX (0x1<<0) +#define B577XX_DOORBELL_HDR_RX_SHIFT 0 + /* 0 for normal doorbell, 1 for advertise wnd doorbell */ +#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) +#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 + /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */ +#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 + /* connection type */ +#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +struct bnx2i_5771x_dbell { + struct bnx2i_5771x_dbell_hdr dbell; + u8 pad[3]; + +}; + +/** + * struct qp_info - QP (share queue region) atrributes structure + * + * @ctx_base: ioremapped pci register base to access doorbell register + * pertaining to this offloaded connection + * @sq_virt: virtual address of send queue (SQ) region + * @sq_phys: DMA address of SQ memory region + * @sq_mem_size: SQ size + * @sq_prod_qe: SQ producer entry pointer + * @sq_cons_qe: SQ consumer entry pointer + * @sq_first_qe: virtaul address of first entry in SQ + * @sq_last_qe: virtaul address of last entry in SQ + * @sq_prod_idx: SQ producer index + * @sq_cons_idx: SQ consumer index + * @sqe_left: number sq entry left + * @sq_pgtbl_virt: page table describing buffer consituting SQ region + * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt' + * @sq_pgtbl_size: SQ page table size + * @cq_virt: virtual address of completion queue (CQ) region + * @cq_phys: DMA address of RQ memory region + * @cq_mem_size: CQ size + * @cq_prod_qe: CQ producer entry pointer + * @cq_cons_qe: CQ consumer entry pointer + * @cq_first_qe: virtaul address of first entry in CQ + * @cq_last_qe: virtaul address of last entry in CQ + * @cq_prod_idx: CQ producer index + * @cq_cons_idx: CQ consumer index + * @cqe_left: number cq entry left + * @cqe_size: size of each CQ entry + * @cqe_exp_seq_sn: next expected CQE sequence number + * @cq_pgtbl_virt: page table describing buffer consituting CQ region + * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt' + * @cq_pgtbl_size: CQ page table size + * @rq_virt: virtual address of receive queue (RQ) region + * @rq_phys: DMA address of RQ memory region + * @rq_mem_size: RQ size + * @rq_prod_qe: RQ producer entry pointer + * @rq_cons_qe: RQ consumer entry pointer + * @rq_first_qe: virtaul address of first entry in RQ + * @rq_last_qe: virtaul address of last entry in RQ + * @rq_prod_idx: RQ producer index + * @rq_cons_idx: RQ consumer index + * @rqe_left: number rq entry left + * @rq_pgtbl_virt: page table describing buffer consituting RQ region + * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt' + * @rq_pgtbl_size: RQ page table size + * + * queue pair (QP) is a per connection shared data structure which is used + * to send work requests (SQ), receive completion notifications (CQ) + * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure + * below holds queue memory, consumer/producer indexes and page table + * information + */ +struct qp_info { + void __iomem *ctx_base; +#define DPM_TRIGER_TYPE 0x40 + +#define BNX2I_570x_QUE_DB_SIZE 0 +#define BNX2I_5771x_QUE_DB_SIZE 16 + struct sqe *sq_virt; + dma_addr_t sq_phys; + u32 sq_mem_size; + + struct sqe *sq_prod_qe; + struct sqe *sq_cons_qe; + struct sqe *sq_first_qe; + struct sqe *sq_last_qe; + u16 sq_prod_idx; + u16 sq_cons_idx; + u32 sqe_left; + + void *sq_pgtbl_virt; + dma_addr_t sq_pgtbl_phys; + u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ + + struct cqe *cq_virt; + dma_addr_t cq_phys; + u32 cq_mem_size; + + struct cqe *cq_prod_qe; + struct cqe *cq_cons_qe; + struct cqe *cq_first_qe; + struct cqe *cq_last_qe; + u16 cq_prod_idx; + u16 cq_cons_idx; + u32 cqe_left; + u32 cqe_size; + u32 cqe_exp_seq_sn; + + void *cq_pgtbl_virt; + dma_addr_t cq_pgtbl_phys; + u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ + + struct rqe *rq_virt; + dma_addr_t rq_phys; + u32 rq_mem_size; + + struct rqe *rq_prod_qe; + struct rqe *rq_cons_qe; + struct rqe *rq_first_qe; + struct rqe *rq_last_qe; + u16 rq_prod_idx; + u16 rq_cons_idx; + u32 rqe_left; + + void *rq_pgtbl_virt; + dma_addr_t rq_pgtbl_phys; + u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ +}; + + + +/* + * CID handles + */ +struct ep_handles { + u32 fw_cid; + u32 drv_iscsi_cid; + u16 pg_cid; + u16 rsvd; +}; + + +enum { + EP_STATE_IDLE = 0x0, + EP_STATE_PG_OFLD_START = 0x1, + EP_STATE_PG_OFLD_COMPL = 0x2, + EP_STATE_OFLD_START = 0x4, + EP_STATE_OFLD_COMPL = 0x8, + EP_STATE_CONNECT_START = 0x10, + EP_STATE_CONNECT_COMPL = 0x20, + EP_STATE_ULP_UPDATE_START = 0x40, + EP_STATE_ULP_UPDATE_COMPL = 0x80, + EP_STATE_DISCONN_START = 0x100, + EP_STATE_DISCONN_COMPL = 0x200, + EP_STATE_CLEANUP_START = 0x400, + EP_STATE_CLEANUP_CMPL = 0x800, + EP_STATE_TCP_FIN_RCVD = 0x1000, + EP_STATE_TCP_RST_RCVD = 0x2000, + EP_STATE_PG_OFLD_FAILED = 0x1000000, + EP_STATE_ULP_UPDATE_FAILED = 0x2000000, + EP_STATE_CLEANUP_FAILED = 0x4000000, + EP_STATE_OFLD_FAILED = 0x8000000, + EP_STATE_CONNECT_FAILED = 0x10000000, + EP_STATE_DISCONN_TIMEDOUT = 0x20000000, +}; + +/** + * struct bnx2i_endpoint - representation of tcp connection in NX2 world + * + * @link: list head to link elements + * @hba: adapter to which this connection belongs + * @conn: iscsi connection this EP is linked to + * @sess: iscsi session this EP is linked to + * @cm_sk: cnic sock struct + * @hba_age: age to detect if 'iscsid' issues ep_disconnect() + * after HBA reset is completed by bnx2i/cnic/bnx2 + * modules + * @state: tracks offload connection state machine + * @teardown_mode: indicates if conn teardown is abortive or orderly + * @qp: QP information + * @ids: contains chip allocated *context id* & driver assigned + * *iscsi cid* + * @ofld_timer: offload timer to detect timeout + * @ofld_wait: wait queue + * + * Endpoint Structure - equivalent of tcp socket structure + */ +struct bnx2i_endpoint { + struct list_head link; + struct bnx2i_hba *hba; + struct bnx2i_conn *conn; + struct cnic_sock *cm_sk; + u32 hba_age; + u32 state; + unsigned long timestamp; + int num_active_cmds; + + struct qp_info qp; + struct ep_handles ids; + #define ep_iscsi_cid ids.drv_iscsi_cid + #define ep_cid ids.fw_cid + #define ep_pg_cid ids.pg_cid + struct timer_list ofld_timer; + wait_queue_head_t ofld_wait; +}; + + + +/* Global variables */ +extern unsigned int error_mask1, error_mask2; +extern u64 iscsi_error_mask; +extern unsigned int en_tcp_dack; +extern unsigned int event_coal_div; + +extern struct scsi_transport_template *bnx2i_scsi_xport_template; +extern struct iscsi_transport bnx2i_iscsi_transport; +extern struct cnic_ulp_ops bnx2i_cnic_cb; + +extern unsigned int sq_size; +extern unsigned int rq_size; + +extern struct device_attribute *bnx2i_dev_attributes[]; + + + +/* + * Function Prototypes + */ +extern void bnx2i_identify_device(struct bnx2i_hba *hba); +extern void bnx2i_register_device(struct bnx2i_hba *hba); + +extern void bnx2i_ulp_init(struct cnic_dev *dev); +extern void bnx2i_ulp_exit(struct cnic_dev *dev); +extern void bnx2i_start(void *handle); +extern void bnx2i_stop(void *handle); +extern void bnx2i_reg_dev_all(void); +extern void bnx2i_unreg_dev_all(void); +extern struct bnx2i_hba *get_adapter_list_head(void); + +struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, + u16 iscsi_cid); + +int bnx2i_alloc_ep_pool(void); +void bnx2i_release_ep_pool(void); +struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba); +struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba); + +struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic); + +struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic); +void bnx2i_free_hba(struct bnx2i_hba *hba); + +void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len); +void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count); + +void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd); + +void bnx2i_drop_session(struct iscsi_cls_session *session); + +extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba); +extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, + struct bnx2i_cmd *cmnd); +extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, + struct iscsi_task *mtask, u32 ttt, + char *datap, int data_len, int unsol); +extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, + struct iscsi_task *mtask); +extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, + struct bnx2i_cmd *cmd); +extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn); +extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); + +extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep); +extern void bnx2i_ep_ofld_timer(unsigned long data); +extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list( + struct bnx2i_hba *hba, u32 iscsi_cid); +extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list( + struct bnx2i_hba *hba, u32 iscsi_cid); + +extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep); +extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); + +/* Debug related function prototypes */ +extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); +extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); + +#endif diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c new file mode 100644 index 000000000000..906cef5cda86 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -0,0 +1,2405 @@ +/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#include <scsi/scsi_tcq.h> +#include <scsi/libiscsi.h> +#include "bnx2i.h" + +/** + * bnx2i_get_cid_num - get cid from ep + * @ep: endpoint pointer + * + * Only applicable to 57710 family of devices + */ +static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep) +{ + u32 cid; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + cid = ep->ep_cid; + else + cid = GET_CID_NUM(ep->ep_cid); + return cid; +} + + +/** + * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type + * @hba: Adapter for which adjustments is to be made + * + * Only applicable to 57710 family of devices + */ +static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) +{ + u32 num_elements_per_pg; + + if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) || + test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) || + test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { + if (!is_power_of_2(hba->max_sqes)) + hba->max_sqes = rounddown_pow_of_two(hba->max_sqes); + + if (!is_power_of_2(hba->max_rqes)) + hba->max_rqes = rounddown_pow_of_two(hba->max_rqes); + } + + /* Adjust each queue size if the user selection does not + * yield integral num of page buffers + */ + /* adjust SQ */ + num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + if (hba->max_sqes < num_elements_per_pg) + hba->max_sqes = num_elements_per_pg; + else if (hba->max_sqes % num_elements_per_pg) + hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); + + /* adjust CQ */ + num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; + if (hba->max_cqes < num_elements_per_pg) + hba->max_cqes = num_elements_per_pg; + else if (hba->max_cqes % num_elements_per_pg) + hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); + + /* adjust RQ */ + num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; + if (hba->max_rqes < num_elements_per_pg) + hba->max_rqes = num_elements_per_pg; + else if (hba->max_rqes % num_elements_per_pg) + hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) & + ~(num_elements_per_pg - 1); +} + + +/** + * bnx2i_get_link_state - get network interface link state + * @hba: adapter instance pointer + * + * updates adapter structure flag based on netdev state + */ +static void bnx2i_get_link_state(struct bnx2i_hba *hba) +{ + if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); +} + + +/** + * bnx2i_iscsi_license_error - displays iscsi license related error message + * @hba: adapter instance pointer + * @error_code: error classification + * + * Puts out an error log when driver is unable to offload iscsi connection + * due to license restrictions + */ +static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) +{ + if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED) + /* iSCSI offload not supported on this device */ + printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n", + hba->netdev->name); + if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED) + /* iSCSI offload not supported on this LOM device */ + printk(KERN_ERR "bnx2i: LOM is not enable to " + "offload iSCSI connections, dev=%s\n", + hba->netdev->name); + set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state); +} + + +/** + * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification + * @ep: endpoint (transport indentifier) structure + * @action: action, ARM or DISARM. For now only ARM_CQE is used + * + * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt + * the driver. EQ event is generated CQ index is hit or at least 1 CQ is + * outstanding and on chip timer expires + */ +void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) +{ + struct bnx2i_5771x_cq_db *cq_db; + u16 cq_index; + + if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + return; + + if (action == CNIC_ARM_CQE) { + cq_index = ep->qp.cqe_exp_seq_sn + + ep->num_active_cmds / event_coal_div; + cq_index %= (ep->qp.cqe_size * 2 + 1); + if (!cq_index) { + cq_index = 1; + cq_db = (struct bnx2i_5771x_cq_db *) + ep->qp.cq_pgtbl_virt; + cq_db->sqn[0] = cq_index; + } + } +} + + +/** + * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer + * @conn: iscsi connection on which RQ event occured + * @ptr: driver buffer to which RQ buffer contents is to + * be copied + * @len: length of valid data inside RQ buf + * + * Copies RQ buffer contents from shared (DMA'able) memory region to + * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and + * scsi sense info + */ +void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len) +{ + if (!bnx2i_conn->ep->qp.rqe_left) + return; + + bnx2i_conn->ep->qp.rqe_left--; + memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); + if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { + bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; + bnx2i_conn->ep->qp.rq_cons_idx = 0; + } else { + bnx2i_conn->ep->qp.rq_cons_qe++; + bnx2i_conn->ep->qp.rq_cons_idx++; + } +} + + +static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn) +{ + struct bnx2i_5771x_dbell dbell; + u32 msg; + + memset(&dbell, 0, sizeof(dbell)); + dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT); + msg = *((u32 *)&dbell); + /* TODO : get doorbell register mapping */ + writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); +} + + +/** + * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell + * @conn: iscsi connection on which event to post + * @count: number of RQ buffer being posted to chip + * + * No need to ring hardware doorbell for 57710 family of devices + */ +void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count) +{ + struct bnx2i_5771x_sq_rq_db *rq_db; + u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + + ep->qp.rqe_left += count; + ep->qp.rq_prod_idx &= 0x7FFF; + ep->qp.rq_prod_idx += count; + + if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { + ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; + if (!hi_bit) + ep->qp.rq_prod_idx |= 0x8000; + } else + ep->qp.rq_prod_idx |= hi_bit; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; + rq_db->prod_idx = ep->qp.rq_prod_idx; + /* no need to ring hardware doorbell for 57710 */ + } else { + writew(ep->qp.rq_prod_idx, + ep->qp.ctx_base + CNIC_RECV_DOORBELL); + } + mmiowb(); +} + + +/** + * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine + * @conn: iscsi connection to which new SQ entries belong + * @count: number of SQ WQEs to post + * + * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family + * of devices. For 5706/5708/5709 new SQ WQE count is written into the + * doorbell register + */ +static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count) +{ + struct bnx2i_5771x_sq_rq_db *sq_db; + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + + ep->num_active_cmds++; + wmb(); /* flush SQ WQE memory before the doorbell is rung */ + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; + sq_db->prod_idx = ep->qp.sq_prod_idx; + bnx2i_ring_577xx_doorbell(bnx2i_conn); + } else + writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); + + mmiowb(); /* flush posted PCI writes */ +} + + +/** + * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters + * @conn: iscsi connection to which new SQ entries belong + * @count: number of SQ WQEs to post + * + * this routine will update SQ driver parameters and ring the doorbell + */ +static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn, + int count) +{ + int tmp_cnt; + + if (count == 1) { + if (bnx2i_conn->ep->qp.sq_prod_qe == + bnx2i_conn->ep->qp.sq_last_qe) + bnx2i_conn->ep->qp.sq_prod_qe = + bnx2i_conn->ep->qp.sq_first_qe; + else + bnx2i_conn->ep->qp.sq_prod_qe++; + } else { + if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= + bnx2i_conn->ep->qp.sq_last_qe) + bnx2i_conn->ep->qp.sq_prod_qe += count; + else { + tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - + bnx2i_conn->ep->qp.sq_prod_qe; + bnx2i_conn->ep->qp.sq_prod_qe = + &bnx2i_conn->ep->qp.sq_first_qe[count - + (tmp_cnt + 1)]; + } + } + bnx2i_conn->ep->qp.sq_prod_idx += count; + /* Ring the doorbell */ + bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); +} + + +/** + * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware + * @conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Login request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task) +{ + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_login_request *login_wqe; + struct iscsi_login *login_hdr; + u32 dword; + + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; + login_hdr = (struct iscsi_login *)task->hdr; + login_wqe = (struct bnx2i_login_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + + login_wqe->op_code = login_hdr->opcode; + login_wqe->op_attr = login_hdr->flags; + login_wqe->version_max = login_hdr->max_version; + login_wqe->version_min = login_hdr->min_version; + login_wqe->data_length = ntoh24(login_hdr->dlength); + login_wqe->isid_lo = *((u32 *) login_hdr->isid); + login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2); + login_wqe->tsih = login_hdr->tsih; + login_wqe->itt = task->itt | + (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT); + login_wqe->cid = login_hdr->cid; + + login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn); + login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); + + login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; + login_wqe->resp_bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); + + dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) | + (bnx2i_conn->gen_pdu.resp_buf_size << + ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); + login_wqe->resp_buffer = dword; + login_wqe->flags = 0; + login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; + login_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); + login_wqe->num_bds = 1; + login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware + * @conn: iscsi connection + * @mtask: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Login request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *mtask) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_tm *tmfabort_hdr; + struct scsi_cmnd *ref_sc; + struct iscsi_task *ctask; + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_tmf_request *tmfabort_wqe; + u32 dword; + + bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; + tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; + tmfabort_wqe = (struct bnx2i_tmf_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + + tmfabort_wqe->op_code = tmfabort_hdr->opcode; + tmfabort_wqe->op_attr = 0; + tmfabort_wqe->op_attr = + ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; + tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]); + tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]); + + tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); + tmfabort_wqe->reserved2 = 0; + tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); + + ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); + if (!ctask || ctask->sc) + /* + * the iscsi layer must have completed the cmd while this + * was starting up. + */ + return 0; + ref_sc = ctask->sc; + + if (ref_sc->sc_data_direction == DMA_TO_DEVICE) + dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); + else + dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); + tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt); + tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); + + tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; + tmfabort_wqe->bd_list_addr_hi = (u32) + ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + tmfabort_wqe->num_bds = 1; + tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware + * @conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, + struct bnx2i_cmd *cmd) +{ + struct bnx2i_cmd_request *scsi_cmd_wqe; + + scsi_cmd_wqe = (struct bnx2i_cmd_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request)); + scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + +/** + * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware + * @conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * @ttt: TTT to be used when building pdu header + * @datap: payload buffer pointer + * @data_len: payload data length + * @unsol: indicated whether nopout pdu is unsolicited pdu or + * in response to target's NOPIN w/ TTT != FFFFFFFF + * + * prepare and post a nopout request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task, u32 ttt, + char *datap, int data_len, int unsol) +{ + struct bnx2i_endpoint *ep = bnx2i_conn->ep; + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_nop_out_request *nopout_wqe; + struct iscsi_nopout *nopout_hdr; + + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; + nopout_hdr = (struct iscsi_nopout *)task->hdr; + nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; + nopout_wqe->op_code = nopout_hdr->opcode; + nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; + memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + u32 tmp = nopout_hdr->lun[0]; + /* 57710 requires LUN field to be swapped */ + nopout_hdr->lun[0] = nopout_hdr->lun[1]; + nopout_hdr->lun[1] = tmp; + } + + nopout_wqe->itt = ((u16)task->itt | + (ISCSI_TASK_TYPE_MPATH << + ISCSI_TMF_REQUEST_TYPE_SHIFT)); + nopout_wqe->ttt = ttt; + nopout_wqe->flags = 0; + if (!unsol) + nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; + else if (nopout_hdr->itt == RESERVED_ITT) + nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; + + nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); + nopout_wqe->data_length = data_len; + if (data_len) { + /* handle payload data, not required in first release */ + printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n"); + } else { + nopout_wqe->bd_list_addr_lo = (u32) + bnx2i_conn->hba->mp_bd_dma; + nopout_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + nopout_wqe->num_bds = 1; + } + nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + +/** + * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware + * @conn: iscsi connection + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post logout request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *task) +{ + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_logout_request *logout_wqe; + struct iscsi_logout *logout_hdr; + + bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; + logout_hdr = (struct iscsi_logout *)task->hdr; + + logout_wqe = (struct bnx2i_logout_request *) + bnx2i_conn->ep->qp.sq_prod_qe; + memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request)); + + logout_wqe->op_code = logout_hdr->opcode; + logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn); + logout_wqe->op_attr = + logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE; + logout_wqe->itt = ((u16)task->itt | + (ISCSI_TASK_TYPE_MPATH << + ISCSI_LOGOUT_REQUEST_TYPE_SHIFT)); + logout_wqe->data_length = 0; + logout_wqe->cid = 0; + + logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; + logout_wqe->bd_list_addr_hi = (u32) + ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); + logout_wqe->num_bds = 1; + logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + +/** + * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware + * @conn: iscsi connection which requires iscsi parameter update + * + * sends down iSCSI Conn Update request to move iSCSI conn to FFP + */ +void bnx2i_update_iscsi_conn(struct iscsi_conn *conn) +{ + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_update *update_wqe; + struct iscsi_kwqe_conn_update conn_update_kwqe; + + update_wqe = &conn_update_kwqe; + + update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN; + update_wqe->hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + /* 5771x requires conn context id to be passed as is */ + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type)) + update_wqe->context_id = bnx2i_conn->ep->ep_cid; + else + update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7); + update_wqe->conn_flags = 0; + if (conn->hdrdgst_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST; + if (conn->datadgst_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST; + if (conn->session->initial_r2t_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T; + if (conn->session->imm_data_en) + update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA; + + update_wqe->max_send_pdu_length = conn->max_xmit_dlength; + update_wqe->max_recv_pdu_length = conn->max_recv_dlength; + update_wqe->first_burst_length = conn->session->first_burst; + update_wqe->max_burst_length = conn->session->max_burst; + update_wqe->exp_stat_sn = conn->exp_statsn; + update_wqe->max_outstanding_r2ts = conn->session->max_r2t; + update_wqe->session_error_recovery_level = conn->session->erl; + iscsi_conn_printk(KERN_ALERT, conn, + "bnx2i: conn update - MBL 0x%x FBL 0x%x" + "MRDSL_I 0x%x MRDSL_T 0x%x \n", + update_wqe->max_burst_length, + update_wqe->first_burst_length, + update_wqe->max_recv_pdu_length, + update_wqe->max_send_pdu_length); + + kwqe_arr[0] = (struct kwqe *) update_wqe; + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); +} + + +/** + * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware + * @data: endpoint (transport handle) structure pointer + * + * routine to handle connection offload/destroy request timeout + */ +void bnx2i_ep_ofld_timer(unsigned long data) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data; + + if (ep->state == EP_STATE_OFLD_START) { + printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n"); + ep->state = EP_STATE_OFLD_FAILED; + } else if (ep->state == EP_STATE_DISCONN_START) { + printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n"); + ep->state = EP_STATE_DISCONN_TIMEDOUT; + } else if (ep->state == EP_STATE_CLEANUP_START) { + printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n"); + ep->state = EP_STATE_CLEANUP_FAILED; + } + + wake_up_interruptible(&ep->ofld_wait); +} + + +static int bnx2i_power_of2(u32 val) +{ + u32 power = 0; + if (val & (val - 1)) + return power; + val--; + while (val) { + val = val >> 1; + power++; + } + return power; +} + + +/** + * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request + * @hba: adapter structure pointer + * @cmd: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) +{ + struct bnx2i_cleanup_request *cmd_cleanup; + + cmd_cleanup = + (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; + memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request)); + + cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST; + cmd_cleanup->itt = cmd->req.itt; + cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(cmd->conn, 1); +} + + +/** + * bnx2i_send_conn_destroy - initiates iscsi connection teardown process + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate + * iscsi connection context clean-up process + */ +void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_destroy conn_cleanup; + + memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); + + conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN; + conn_cleanup.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + /* 5771x requires conn context id to be passed as is */ + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + conn_cleanup.context_id = ep->ep_cid; + else + conn_cleanup.context_id = (ep->ep_cid >> 7); + + conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid; + + kwqe_arr[0] = (struct kwqe *) &conn_cleanup; + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); +} + + +/** + * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[2]; + struct iscsi_kwqe_conn_offload1 ofld_req1; + struct iscsi_kwqe_conn_offload2 ofld_req2; + dma_addr_t dma_addr; + int num_kwqes = 2; + u32 *ptbl; + + ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; + + dma_addr = ep->qp.sq_pgtbl_phys; + ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + dma_addr = ep->qp.cq_pgtbl_phys; + ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + dma_addr = ep->qp.rq_pgtbl_phys; + ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; + ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ptbl = (u32 *) ep->qp.sq_pgtbl_virt; + + ofld_req2.sq_first_pte.hi = *ptbl++; + ofld_req2.sq_first_pte.lo = *ptbl; + + ptbl = (u32 *) ep->qp.cq_pgtbl_virt; + ofld_req2.cq_first_pte.hi = *ptbl++; + ofld_req2.cq_first_pte.lo = *ptbl; + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + ofld_req2.num_additional_wqes = 0; + + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); +} + + +/** + * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + struct kwqe *kwqe_arr[5]; + struct iscsi_kwqe_conn_offload1 ofld_req1; + struct iscsi_kwqe_conn_offload2 ofld_req2; + struct iscsi_kwqe_conn_offload3 ofld_req3[1]; + dma_addr_t dma_addr; + int num_kwqes = 2; + u32 *ptbl; + + ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; + + dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; + ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; + ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; + ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; + ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; + ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); + + ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); + ofld_req2.sq_first_pte.hi = *ptbl++; + ofld_req2.sq_first_pte.lo = *ptbl; + + ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); + ofld_req2.cq_first_pte.hi = *ptbl++; + ofld_req2.cq_first_pte.lo = *ptbl; + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + + ofld_req2.num_additional_wqes = 1; + memset(ofld_req3, 0x00, sizeof(ofld_req3[0])); + ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); + ofld_req3[0].qp_first_pte[0].hi = *ptbl++; + ofld_req3[0].qp_first_pte[0].lo = *ptbl; + + kwqe_arr[2] = (struct kwqe *) ofld_req3; + /* need if we decide to go with multiple KCQE's per conn */ + num_kwqes += 1; + + if (hba->cnic && hba->cnic->submit_kwqes) + hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); +} + +/** + * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process + * + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE + */ +void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + bnx2i_5771x_send_conn_ofld_req(hba, ep); + else + bnx2i_570x_send_conn_ofld_req(hba, ep); +} + + +/** + * setup_qp_page_tables - iscsi QP page table setup function + * @ep: endpoint (transport indentifier) structure + * + * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires + * 64-bit address in big endian format. Whereas 10G/sec (57710) requires + * PT in little endian format + */ +static void setup_qp_page_tables(struct bnx2i_endpoint *ep) +{ + int num_pages; + u32 *ptbl; + dma_addr_t page; + int cnic_dev_10g; + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) + cnic_dev_10g = 1; + else + cnic_dev_10g = 0; + + /* SQ page table */ + memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); + num_pages = ep->qp.sq_mem_size / PAGE_SIZE; + page = ep->qp.sq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.sq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += PAGE_SIZE; + } + } + + /* RQ page table */ + memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); + num_pages = ep->qp.rq_mem_size / PAGE_SIZE; + page = ep->qp.rq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.rq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += PAGE_SIZE; + } + } + + /* CQ page table */ + memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); + num_pages = ep->qp.cq_mem_size / PAGE_SIZE; + page = ep->qp.cq_phys; + + if (cnic_dev_10g) + ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); + else + ptbl = (u32 *) ep->qp.cq_pgtbl_virt; + while (num_pages--) { + if (cnic_dev_10g) { + /* PTE is written in little endian format for 57710 */ + *ptbl = (u32) page; + ptbl++; + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + page += PAGE_SIZE; + } else { + /* PTE is written in big endian format for + * 5706/5708/5709 devices */ + *ptbl = (u32) ((u64) page >> 32); + ptbl++; + *ptbl = (u32) page; + ptbl++; + page += PAGE_SIZE; + } + } +} + + +/** + * bnx2i_alloc_qp_resc - allocates required resources for QP. + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * Allocate QP (transport layer for iSCSI connection) resources, DMA'able + * memory for SQ/RQ/CQ and page tables. EP structure elements such + * as producer/consumer indexes/pointers, queue sizes and page table + * contents are setup + */ +int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + struct bnx2i_5771x_cq_db *cq_db; + + ep->hba = hba; + ep->conn = NULL; + ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0; + + /* Allocate page table memory for SQ which is page aligned */ + ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; + ep->qp.sq_mem_size = + (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + ep->qp.sq_pgtbl_size = + (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); + ep->qp.sq_pgtbl_size = + (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + ep->qp.sq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, + &ep->qp.sq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.sq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n", + ep->qp.sq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual SQ element */ + ep->qp.sq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, + &ep->qp.sq_phys, GFP_KERNEL); + if (!ep->qp.sq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", + ep->qp.sq_mem_size); + goto mem_alloc_err; + } + + memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size); + ep->qp.sq_first_qe = ep->qp.sq_virt; + ep->qp.sq_prod_qe = ep->qp.sq_first_qe; + ep->qp.sq_cons_qe = ep->qp.sq_first_qe; + ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; + ep->qp.sq_prod_idx = 0; + ep->qp.sq_cons_idx = 0; + ep->qp.sqe_left = hba->max_sqes; + + /* Allocate page table memory for CQ which is page aligned */ + ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; + ep->qp.cq_mem_size = + (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + ep->qp.cq_pgtbl_size = + (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); + ep->qp.cq_pgtbl_size = + (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + ep->qp.cq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, + &ep->qp.cq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.cq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n", + ep->qp.cq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual CQ element */ + ep->qp.cq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, + &ep->qp.cq_phys, GFP_KERNEL); + if (!ep->qp.cq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", + ep->qp.cq_mem_size); + goto mem_alloc_err; + } + memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size); + + ep->qp.cq_first_qe = ep->qp.cq_virt; + ep->qp.cq_prod_qe = ep->qp.cq_first_qe; + ep->qp.cq_cons_qe = ep->qp.cq_first_qe; + ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; + ep->qp.cq_prod_idx = 0; + ep->qp.cq_cons_idx = 0; + ep->qp.cqe_left = hba->max_cqes; + ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; + ep->qp.cqe_size = hba->max_cqes; + + /* Invalidate all EQ CQE index, req only for 57710 */ + cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; + memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS); + + /* Allocate page table memory for RQ which is page aligned */ + ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; + ep->qp.rq_mem_size = + (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + ep->qp.rq_pgtbl_size = + (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); + ep->qp.rq_pgtbl_size = + (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + ep->qp.rq_pgtbl_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, + &ep->qp.rq_pgtbl_phys, GFP_KERNEL); + if (!ep->qp.rq_pgtbl_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n", + ep->qp.rq_pgtbl_size); + goto mem_alloc_err; + } + + /* Allocate memory area for actual RQ element */ + ep->qp.rq_virt = + dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, + &ep->qp.rq_phys, GFP_KERNEL); + if (!ep->qp.rq_virt) { + printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n", + ep->qp.rq_mem_size); + goto mem_alloc_err; + } + + ep->qp.rq_first_qe = ep->qp.rq_virt; + ep->qp.rq_prod_qe = ep->qp.rq_first_qe; + ep->qp.rq_cons_qe = ep->qp.rq_first_qe; + ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; + ep->qp.rq_prod_idx = 0x8000; + ep->qp.rq_cons_idx = 0; + ep->qp.rqe_left = hba->max_rqes; + + setup_qp_page_tables(ep); + + return 0; + +mem_alloc_err: + bnx2i_free_qp_resc(hba, ep); + return -ENOMEM; +} + + + +/** + * bnx2i_free_qp_resc - free memory resources held by QP + * @hba: adapter structure pointer + * @ep: endpoint (transport indentifier) structure + * + * Free QP resources - SQ/RQ/CQ memory and page tables. + */ +void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) +{ + if (ep->qp.ctx_base) { + iounmap(ep->qp.ctx_base); + ep->qp.ctx_base = NULL; + } + /* Free SQ mem */ + if (ep->qp.sq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, + ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); + ep->qp.sq_pgtbl_virt = NULL; + ep->qp.sq_pgtbl_phys = 0; + } + if (ep->qp.sq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, + ep->qp.sq_virt, ep->qp.sq_phys); + ep->qp.sq_virt = NULL; + ep->qp.sq_phys = 0; + } + + /* Free RQ mem */ + if (ep->qp.rq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, + ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); + ep->qp.rq_pgtbl_virt = NULL; + ep->qp.rq_pgtbl_phys = 0; + } + if (ep->qp.rq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, + ep->qp.rq_virt, ep->qp.rq_phys); + ep->qp.rq_virt = NULL; + ep->qp.rq_phys = 0; + } + + /* Free CQ mem */ + if (ep->qp.cq_pgtbl_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, + ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); + ep->qp.cq_pgtbl_virt = NULL; + ep->qp.cq_pgtbl_phys = 0; + } + if (ep->qp.cq_virt) { + dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, + ep->qp.cq_virt, ep->qp.cq_phys); + ep->qp.cq_virt = NULL; + ep->qp.cq_phys = 0; + } +} + + +/** + * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w + * @hba: adapter structure pointer + * + * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w + * This results in iSCSi support validation and on-chip context manager + * initialization. Firmware completes this handshake with a CQE carrying + * the result of iscsi support validation. Parameter carried by + * iscsi init request determines the number of offloaded connection and + * tolerance level for iscsi protocol violation this hba/chip can support + */ +int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) +{ + struct kwqe *kwqe_arr[3]; + struct iscsi_kwqe_init1 iscsi_init; + struct iscsi_kwqe_init2 iscsi_init2; + int rc = 0; + u64 mask64; + + bnx2i_adjust_qp_size(hba); + + iscsi_init.flags = + ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; + if (en_tcp_dack) + iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; + iscsi_init.reserved0 = 0; + iscsi_init.num_cqs = 1; + iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1; + iscsi_init.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + + iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; + iscsi_init.dummy_buffer_addr_hi = + (u32) ((u64) hba->dummy_buf_dma >> 32); + + hba->ctx_ccell_tasks = + ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); + iscsi_init.num_ccells_per_conn = hba->num_ccell; + iscsi_init.num_tasks_per_conn = hba->max_sqes; + iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; + iscsi_init.sq_num_wqes = hba->max_sqes; + iscsi_init.cq_log_wqes_per_page = + (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); + iscsi_init.cq_num_wqes = hba->max_cqes; + iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + + (PAGE_SIZE - 1)) / PAGE_SIZE; + iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + + (PAGE_SIZE - 1)) / PAGE_SIZE; + iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; + iscsi_init.rq_num_wqes = hba->max_rqes; + + + iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2; + iscsi_init2.hdr.flags = + (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); + iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1; + mask64 = 0x0ULL; + mask64 |= ( + /* CISCO MDS */ + (1UL << + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) | + /* HP MSA1510i */ + (1UL << + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) | + /* EMC */ + (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN)); + if (error_mask1) + iscsi_init2.error_bit_map[0] = error_mask1; + else + iscsi_init2.error_bit_map[0] = (u32) mask64; + + if (error_mask2) + iscsi_init2.error_bit_map[1] = error_mask2; + else + iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32); + + iscsi_error_mask = mask64; + + kwqe_arr[0] = (struct kwqe *) &iscsi_init; + kwqe_arr[1] = (struct kwqe *) &iscsi_init2; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2); + return rc; +} + + +/** + * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion. + * @conn: iscsi connection + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process SCSI CMD Response CQE & complete the request to SCSI-ML + */ +static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_cmd_response *resp_cqe; + struct bnx2i_cmd *bnx2i_cmd; + struct iscsi_task *task; + struct iscsi_cmd_rsp *hdr; + u32 datalen = 0; + + resp_cqe = (struct bnx2i_cmd_response *)cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); + if (!task) + goto fail; + + bnx2i_cmd = task->dd_data; + + if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { + conn->datain_pdus_cnt += + resp_cqe->task_stat.read_stat.num_data_outs; + conn->rxdata_octets += + bnx2i_cmd->req.total_data_transfer_length; + } else { + conn->dataout_pdus_cnt += + resp_cqe->task_stat.read_stat.num_data_outs; + conn->r2t_pdus_cnt += + resp_cqe->task_stat.read_stat.num_r2ts; + conn->txdata_octets += + bnx2i_cmd->req.total_data_transfer_length; + } + bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); + + hdr = (struct iscsi_cmd_rsp *)task->hdr; + resp_cqe = (struct bnx2i_cmd_response *)cqe; + hdr->opcode = resp_cqe->op_code; + hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn); + hdr->response = resp_cqe->response; + hdr->cmd_status = resp_cqe->status; + hdr->flags = resp_cqe->response_flags; + hdr->residual_count = cpu_to_be32(resp_cqe->residual_count); + + if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN) + goto done; + + if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) { + datalen = resp_cqe->data_length; + if (datalen < 2) + goto done; + + if (datalen > BNX2I_RQ_WQE_SIZE) { + iscsi_conn_printk(KERN_ERR, conn, + "sense data len %d > RQ sz\n", + datalen); + datalen = BNX2I_RQ_WQE_SIZE; + } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) { + iscsi_conn_printk(KERN_ERR, conn, + "sense data len %d > conn data\n", + datalen); + datalen = ISCSI_DEF_MAX_RECV_SEG_LEN; + } + + bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen); + bnx2i_put_rq_buf(bnx2i_cmd->conn, 1); + } + +done: + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, + conn->data, datalen); +fail: + spin_unlock(&session->lock); + return 0; +} + + +/** + * bnx2i_process_login_resp - this function handles iscsi login response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process Login Response CQE & complete it to open-iscsi user daemon + */ +static int bnx2i_process_login_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_login_response *login; + struct iscsi_login_rsp *resp_hdr; + int pld_len; + int pad_len; + + login = (struct bnx2i_login_response *) cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + login->itt & ISCSI_LOGIN_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = login->op_code; + resp_hdr->flags = login->response_flags; + resp_hdr->max_version = login->version_max; + resp_hdr->active_version = login->version_active;; + resp_hdr->hlength = 0; + + hton24(resp_hdr->dlength, login->data_length); + memcpy(resp_hdr->isid, &login->isid_lo, 6); + resp_hdr->tsih = cpu_to_be16(login->tsih); + resp_hdr->itt = task->hdr->itt; + resp_hdr->statsn = cpu_to_be32(login->stat_sn); + resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn); + resp_hdr->status_class = login->status_class; + resp_hdr->status_detail = login->status_detail; + pld_len = login->data_length; + bnx2i_conn->gen_pdu.resp_wr_ptr = + bnx2i_conn->gen_pdu.resp_buf + pld_len; + + pad_len = 0; + if (pld_len & 0x3) + pad_len = 4 - (pld_len % 4); + + if (pad_len) { + int i = 0; + for (i = 0; i < pad_len; i++) { + bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; + bnx2i_conn->gen_pdu.resp_wr_ptr++; + } + } + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); +done: + spin_unlock(&session->lock); + return 0; +} + +/** + * bnx2i_process_tmf_resp - this function handles iscsi TMF response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI TMF Response CQE and wake up the driver eh thread. + */ +static int bnx2i_process_tmf_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_tmf_response *tmf_cqe; + struct iscsi_tm_rsp *resp_hdr; + + tmf_cqe = (struct bnx2i_tmf_response *)cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = tmf_cqe->op_code; + resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn); + resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn); + resp_hdr->itt = task->hdr->itt; + resp_hdr->response = tmf_cqe->response; + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); +done: + spin_unlock(&session->lock); + return 0; +} + +/** + * bnx2i_process_logout_resp - this function handles iscsi logout response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI Logout Response CQE & make function call to + * notify the user daemon. + */ +static int bnx2i_process_logout_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_logout_response *logout; + struct iscsi_logout_rsp *resp_hdr; + + logout = (struct bnx2i_logout_response *) cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = logout->op_code; + resp_hdr->flags = logout->response; + resp_hdr->hlength = 0; + + resp_hdr->itt = task->hdr->itt; + resp_hdr->statsn = task->hdr->exp_statsn; + resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn); + + resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait); + resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain); + + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); +done: + spin_unlock(&session->lock); + return 0; +} + +/** + * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI NOPIN local completion CQE, frees IIT and command structures + */ +static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_nop_in_msg *nop_in; + struct iscsi_task *task; + + nop_in = (struct bnx2i_nop_in_msg *)cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); + if (task) + iscsi_put_task(task); + spin_unlock(&session->lock); +} + +/** + * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd + * @conn: iscsi connection + * + * Firmware advances RQ producer index for every unsolicited PDU even if + * payload data length is '0'. This function makes corresponding + * adjustments on the driver side to match this f/w behavior + */ +static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn) +{ + char dummy_rq_data[2]; + bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1); + bnx2i_put_rq_buf(bnx2i_conn, 1); +} + + +/** + * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI target's proactive iSCSI NOPIN request + */ +static int bnx2i_process_nopin_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_nop_in_msg *nop_in; + struct iscsi_nopin *hdr; + u32 itt; + int tgt_async_nop = 0; + + nop_in = (struct bnx2i_nop_in_msg *)cqe; + itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX; + + spin_lock(&session->lock); + hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = nop_in->op_code; + hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); + hdr->ttt = cpu_to_be32(nop_in->ttt); + + if (itt == (u16) RESERVED_ITT) { + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + hdr->itt = RESERVED_ITT; + tgt_async_nop = 1; + goto done; + } + + /* this is a response to one of our nop-outs */ + task = iscsi_itt_to_task(conn, itt); + if (task) { + hdr->flags = ISCSI_FLAG_CMD_FINAL; + hdr->itt = task->hdr->itt; + hdr->ttt = cpu_to_be32(nop_in->ttt); + memcpy(hdr->lun, nop_in->lun, 8); + } +done: + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); + spin_unlock(&session->lock); + + return tgt_async_nop; +} + + +/** + * bnx2i_process_async_mesg - this function handles iscsi async message + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI ASYNC Message + */ +static void bnx2i_process_async_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct bnx2i_async_msg *async_cqe; + struct iscsi_async *resp_hdr; + u8 async_event; + + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + + async_cqe = (struct bnx2i_async_msg *)cqe; + async_event = async_cqe->async_event; + + if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) { + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "async: scsi events not supported\n"); + return; + } + + spin_lock(&session->lock); + resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = async_cqe->op_code; + resp_hdr->flags = 0x80; + + memcpy(resp_hdr->lun, async_cqe->lun, 8); + resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn); + + resp_hdr->async_event = async_cqe->async_event; + resp_hdr->async_vcode = async_cqe->async_vcode; + + resp_hdr->param1 = cpu_to_be16(async_cqe->param1); + resp_hdr->param2 = cpu_to_be16(async_cqe->param2); + resp_hdr->param3 = cpu_to_be16(async_cqe->param3); + + __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data, + (struct iscsi_hdr *)resp_hdr, NULL, 0); + spin_unlock(&session->lock); +} + + +/** + * bnx2i_process_reject_mesg - process iscsi reject pdu + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI REJECT message + */ +static void bnx2i_process_reject_mesg(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct bnx2i_reject_msg *reject; + struct iscsi_reject *hdr; + + reject = (struct bnx2i_reject_msg *) cqe; + if (reject->data_length) { + bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length); + bnx2i_put_rq_buf(bnx2i_conn, 1); + } else + bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); + + spin_lock(&session->lock); + hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr; + memset(hdr, 0, sizeof(struct iscsi_hdr)); + hdr->opcode = reject->op_code; + hdr->reason = reject->reason; + hton24(hdr->dlength, reject->data_length); + hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn); + hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn); + hdr->ffffffff = cpu_to_be32(RESERVED_ITT); + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, + reject->data_length); + spin_unlock(&session->lock); +} + +/** + * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process command cleanup response CQE during conn shutdown or error recovery + */ +static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct bnx2i_cleanup_response *cmd_clean_rsp; + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + + cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, + cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); + if (!task) + printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", + cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); + spin_unlock(&session->lock); + complete(&bnx2i_conn->cmd_cleanup_cmpl); +} + + + +/** + * bnx2i_process_new_cqes - process newly DMA'ed CQE's + * @bnx2i_conn: iscsi connection + * + * this function is called by generic KCQ handler to process all pending CQE's + */ +static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_session *session = conn->session; + struct qp_info *qp = &bnx2i_conn->ep->qp; + struct bnx2i_nop_in_msg *nopin; + int tgt_async_msg; + + while (1) { + nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; + if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) + break; + + if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) + break; + + tgt_async_msg = 0; + + switch (nopin->op_code) { + case ISCSI_OP_SCSI_CMD_RSP: + case ISCSI_OP_SCSI_DATA_IN: + bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_LOGIN_RSP: + bnx2i_process_login_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_SCSI_TMFUNC_RSP: + bnx2i_process_tmf_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_LOGOUT_RSP: + bnx2i_process_logout_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_NOOP_IN: + if (bnx2i_process_nopin_mesg(session, bnx2i_conn, + qp->cq_cons_qe)) + tgt_async_msg = 1; + break; + case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION: + bnx2i_process_nopin_local_cmpl(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OP_ASYNC_EVENT: + bnx2i_process_async_mesg(session, bnx2i_conn, + qp->cq_cons_qe); + tgt_async_msg = 1; + break; + case ISCSI_OP_REJECT: + bnx2i_process_reject_mesg(session, bnx2i_conn, + qp->cq_cons_qe); + break; + case ISCSI_OPCODE_CLEANUP_RESPONSE: + bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; + default: + printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", + nopin->op_code); + } + + if (!tgt_async_msg) + bnx2i_conn->ep->num_active_cmds--; + + /* clear out in production version only, till beta keep opcode + * field intact, will be helpful in debugging (context dump) + * nopin->op_code = 0; + */ + qp->cqe_exp_seq_sn++; + if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) + qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; + + if (qp->cq_cons_qe == qp->cq_last_qe) { + qp->cq_cons_qe = qp->cq_first_qe; + qp->cq_cons_idx = 0; + } else { + qp->cq_cons_qe++; + qp->cq_cons_idx++; + } + } + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); +} + +/** + * bnx2i_fastpath_notification - process global event queue (KCQ) + * @hba: adapter structure pointer + * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry + * + * Fast path event notification handler, KCQ entry carries context id + * of the connection that has 1 or more pending CQ entries + */ +static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, + struct iscsi_kcqe *new_cqe_kcqe) +{ + struct bnx2i_conn *conn; + u32 iscsi_cid; + + iscsi_cid = new_cqe_kcqe->iscsi_conn_id; + conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!conn) { + printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); + return; + } + if (!conn->ep) { + printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); + return; + } + + bnx2i_process_new_cqes(conn); +} + + +/** + * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration + */ +static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *update_kcqe) +{ + struct bnx2i_conn *conn; + u32 iscsi_cid; + + iscsi_cid = update_kcqe->iscsi_conn_id; + conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!conn) { + printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid); + return; + } + if (!conn->ep) { + printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid); + return; + } + + if (update_kcqe->completion_status) { + printk(KERN_ALERT "request failed cid %x\n", iscsi_cid); + conn->ep->state = EP_STATE_ULP_UPDATE_FAILED; + } else + conn->ep->state = EP_STATE_ULP_UPDATE_COMPL; + + wake_up_interruptible(&conn->ep->ofld_wait); +} + + +/** + * bnx2i_recovery_que_add_conn - add connection to recovery queue + * @hba: adapter structure pointer + * @bnx2i_conn: iscsi connection + * + * Add connection to recovery queue and schedule adapter eh worker + */ +static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data, + ISCSI_ERR_CONN_FAILED); +} + + +/** + * bnx2i_process_tcp_error - process error notification on a given connection + * + * @hba: adapter structure pointer + * @tcp_err: tcp error kcqe pointer + * + * handles tcp level error notifications from FW. + */ +static void bnx2i_process_tcp_error(struct bnx2i_hba *hba, + struct iscsi_kcqe *tcp_err) +{ + struct bnx2i_conn *bnx2i_conn; + u32 iscsi_cid; + + iscsi_cid = tcp_err->iscsi_conn_id; + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + + if (!bnx2i_conn) { + printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); + return; + } + + printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n", + iscsi_cid, tcp_err->completion_status); + bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); +} + + +/** + * bnx2i_process_iscsi_error - process error notification on a given connection + * @hba: adapter structure pointer + * @iscsi_err: iscsi error kcqe pointer + * + * handles iscsi error notifications from the FW. Firmware based in initial + * handshake classifies iscsi protocol / TCP rfc violation into either + * warning or error indications. If indication is of "Error" type, driver + * will initiate session recovery for that connection/session. For + * "Warning" type indication, driver will put out a system log message + * (there will be only one message for each type for the life of the + * session, this is to avoid un-necessarily overloading the system) + */ +static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba, + struct iscsi_kcqe *iscsi_err) +{ + struct bnx2i_conn *bnx2i_conn; + u32 iscsi_cid; + char warn_notice[] = "iscsi_warning"; + char error_notice[] = "iscsi_error"; + char additional_notice[64]; + char *message; + int need_recovery; + u64 err_mask64; + + iscsi_cid = iscsi_err->iscsi_conn_id; + bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); + if (!bnx2i_conn) { + printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); + return; + } + + err_mask64 = (0x1ULL << iscsi_err->completion_status); + + if (err_mask64 & iscsi_error_mask) { + need_recovery = 0; + message = warn_notice; + } else { + need_recovery = 1; + message = error_notice; + } + + switch (iscsi_err->completion_status) { + case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR: + strcpy(additional_notice, "hdr digest err"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR: + strcpy(additional_notice, "data digest err"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE: + strcpy(additional_notice, "wrong opcode rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN: + strcpy(additional_notice, "AHS len > 0 rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT: + strcpy(additional_notice, "invalid ITT rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN: + strcpy(additional_notice, "wrong StatSN rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN: + strcpy(additional_notice, "wrong DataSN rcvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T: + strcpy(additional_notice, "pend R2T violation"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0: + strcpy(additional_notice, "ERL0, UO"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1: + strcpy(additional_notice, "ERL0, U1"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2: + strcpy(additional_notice, "ERL0, U2"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3: + strcpy(additional_notice, "ERL0, U3"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4: + strcpy(additional_notice, "ERL0, U4"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5: + strcpy(additional_notice, "ERL0, U5"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6: + strcpy(additional_notice, "ERL0, U6"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN: + strcpy(additional_notice, "invalid resi len"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN: + strcpy(additional_notice, "MRDSL violation"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO: + strcpy(additional_notice, "F-bit not set"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV: + strcpy(additional_notice, "invalid TTT"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN: + strcpy(additional_notice, "invalid DataSN"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN: + strcpy(additional_notice, "burst len violation"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF: + strcpy(additional_notice, "buf offset violation"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN: + strcpy(additional_notice, "invalid LUN field"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN: + strcpy(additional_notice, "invalid R2TSN field"); + break; +#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 + case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0: + strcpy(additional_notice, "invalid cmd len1"); + break; +#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 + case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1: + strcpy(additional_notice, "invalid cmd len2"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED: + strcpy(additional_notice, + "pend r2t exceeds MaxOutstandingR2T value"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV: + strcpy(additional_notice, "TTT is rsvd"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN: + strcpy(additional_notice, "MBL violation"); + break; +#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO + case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO: + strcpy(additional_notice, "data seg len != 0"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN: + strcpy(additional_notice, "reject pdu len error"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN: + strcpy(additional_notice, "async pdu len error"); + break; + case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN: + strcpy(additional_notice, "nopin pdu len error"); + break; +#define BNX2_ERR_PEND_R2T_IN_CLEANUP \ + ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP + case BNX2_ERR_PEND_R2T_IN_CLEANUP: + strcpy(additional_notice, "pend r2t in cleanup"); + break; + + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT: + strcpy(additional_notice, "IP fragments rcvd"); + break; + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS: + strcpy(additional_notice, "IP options error"); + break; + case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG: + strcpy(additional_notice, "urgent flag error"); + break; + default: + printk(KERN_ALERT "iscsi_err - unknown err %x\n", + iscsi_err->completion_status); + } + + if (need_recovery) { + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "bnx2i: %s - %s\n", + message, additional_notice); + + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "conn_err - hostno %d conn %p, " + "iscsi_cid %x cid %x\n", + bnx2i_conn->hba->shost->host_no, + bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid, + bnx2i_conn->ep->ep_cid); + bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); + } else + if (!test_and_set_bit(iscsi_err->completion_status, + (void *) &bnx2i_conn->violation_notified)) + iscsi_conn_printk(KERN_ALERT, + bnx2i_conn->cls_conn->dd_data, + "bnx2i: %s - %s\n", + message, additional_notice); +} + + +/** + * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion + * @hba: adapter structure pointer + * @conn_destroy: conn destroy kcqe pointer + * + * handles connection destroy completion request. + */ +static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *conn_destroy) +{ + struct bnx2i_endpoint *ep; + + ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); + if (!ep) { + printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending " + "offload request, unexpected complection\n"); + return; + } + + if (hba != ep->hba) { + printk(KERN_ALERT "conn destroy- error hba mis-match\n"); + return; + } + + if (conn_destroy->completion_status) { + printk(KERN_ALERT "conn_destroy_cmpl: op failed\n"); + ep->state = EP_STATE_CLEANUP_FAILED; + } else + ep->state = EP_STATE_CLEANUP_CMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion + * @hba: adapter structure pointer + * @ofld_kcqe: conn offload kcqe pointer + * + * handles initial connection offload completion, ep_connect() thread is + * woken-up to continue with LLP connect process + */ +static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, + struct iscsi_kcqe *ofld_kcqe) +{ + u32 cid_addr; + struct bnx2i_endpoint *ep; + u32 cid_num; + + ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); + if (!ep) { + printk(KERN_ALERT "ofld_cmpl: no pend offload request\n"); + return; + } + + if (hba != ep->hba) { + printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n"); + return; + } + + if (ofld_kcqe->completion_status) { + if (ofld_kcqe->completion_status == + ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) + printk(KERN_ALERT "bnx2i: unable to allocate" + " iSCSI context resources\n"); + ep->state = EP_STATE_OFLD_FAILED; + } else { + ep->state = EP_STATE_OFLD_COMPL; + cid_addr = ofld_kcqe->iscsi_conn_context_id; + cid_num = bnx2i_get_cid_num(ep); + ep->ep_cid = cid_addr; + ep->qp.ctx_base = NULL; + } + wake_up_interruptible(&ep->ofld_wait); +} + +/** + * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * Generic KCQ event handler/dispatcher + */ +static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[], + u32 num_cqe) +{ + struct bnx2i_hba *hba = context; + int i = 0; + struct iscsi_kcqe *ikcqe = NULL; + + while (i < num_cqe) { + ikcqe = (struct iscsi_kcqe *) kcqe[i++]; + + if (ikcqe->op_code == + ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION) + bnx2i_fastpath_notification(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN) + bnx2i_process_ofld_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN) + bnx2i_process_update_conn_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) { + if (ikcqe->completion_status != + ISCSI_KCQE_COMPLETION_STATUS_SUCCESS) + bnx2i_iscsi_license_error(hba, ikcqe->\ + completion_status); + else { + set_bit(ADAPTER_STATE_UP, &hba->adapter_state); + bnx2i_get_link_state(hba); + printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: " + "ISCSI_INIT passed\n", + (u8)hba->pcidev->bus->number, + hba->pci_devno, + (u8)hba->pci_func); + + + } + } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN) + bnx2i_process_conn_destroy_cmpl(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR) + bnx2i_process_iscsi_error(hba, ikcqe); + else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR) + bnx2i_process_tcp_error(hba, ikcqe); + else + printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", + ikcqe->op_code); + } +} + + +/** + * bnx2i_indicate_netevent - Generic netdev event handler + * @context: adapter structure pointer + * @event: event type + * + * Handles four netdev events, NETDEV_UP, NETDEV_DOWN, + * NETDEV_GOING_DOWN and NETDEV_CHANGE + */ +static void bnx2i_indicate_netevent(void *context, unsigned long event) +{ + struct bnx2i_hba *hba = context; + + switch (event) { + case NETDEV_UP: + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + bnx2i_send_fw_iscsi_init_msg(hba); + break; + case NETDEV_DOWN: + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + break; + case NETDEV_GOING_DOWN: + set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + iscsi_host_for_each_session(hba->shost, + bnx2i_drop_session); + break; + case NETDEV_CHANGE: + bnx2i_get_link_state(hba); + break; + default: + ; + } +} + + +/** + * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 TCP connect request. + */ +static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) + ep->state = EP_STATE_CONNECT_FAILED; + else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags)) + ep->state = EP_STATE_CONNECT_COMPL; + else + ep->state = EP_STATE_CONNECT_FAILED; + + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_close_cmpl - process tcp conn close completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 graceful TCP connect shutdown + */ +static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_DISCONN_COMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion + * @cm_sk: cnic sock structure pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate completion of option-2 abortive TCP connect termination + */ +static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_DISCONN_COMPL; + wake_up_interruptible(&ep->ofld_wait); +} + + +/** + * bnx2i_cm_remote_close - process received TCP FIN + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * function callback exported via bnx2i - cnic driver interface to indicate + * async TCP events such as FIN + */ +static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_TCP_FIN_RCVD; + if (ep->conn) + bnx2i_recovery_que_add_conn(ep->hba, ep->conn); +} + +/** + * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup + * @hba: adapter structure pointer + * @update_kcqe: kcqe pointer + * + * function callback exported via bnx2i - cnic driver interface to + * indicate async TCP events (RST) sent by the peer. + */ +static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) +{ + struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; + + ep->state = EP_STATE_TCP_RST_RCVD; + if (ep->conn) + bnx2i_recovery_que_add_conn(ep->hba, ep->conn); +} + + +static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type, + char *buf, u16 buflen) +{ + struct bnx2i_hba *hba; + + hba = bnx2i_find_hba_for_cnic(dev); + if (!hba) + return; + + if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport, + msg_type, buf, buflen)) + printk(KERN_ALERT "bnx2i: private nl message send error\n"); + +} + + +/** + * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure + * carrying callback function pointers + * + */ +struct cnic_ulp_ops bnx2i_cnic_cb = { + .cnic_init = bnx2i_ulp_init, + .cnic_exit = bnx2i_ulp_exit, + .cnic_start = bnx2i_start, + .cnic_stop = bnx2i_stop, + .indicate_kcqes = bnx2i_indicate_kcqe, + .indicate_netevent = bnx2i_indicate_netevent, + .cm_connect_complete = bnx2i_cm_connect_cmpl, + .cm_close_complete = bnx2i_cm_close_cmpl, + .cm_abort_complete = bnx2i_cm_abort_cmpl, + .cm_remote_close = bnx2i_cm_remote_close, + .cm_remote_abort = bnx2i_cm_remote_abort, + .iscsi_nl_send_msg = bnx2i_send_nl_mesg, + .owner = THIS_MODULE +}; + + +/** + * bnx2i_map_ep_dbell_regs - map connection doorbell registers + * @ep: bnx2i endpoint + * + * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these + * register in BAR #0. Whereas in 57710 these register are accessed by + * mapping BAR #1 + */ +int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) +{ + u32 cid_num; + u32 reg_off; + u32 first_l4l5; + u32 ctx_sz; + u32 config2; + resource_size_t reg_base; + + cid_num = bnx2i_get_cid_num(ep); + + if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { + reg_base = pci_resource_start(ep->hba->pcidev, + BNX2X_DOORBELL_PCI_BAR); + reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE; + ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); + goto arm_cq; + } + + reg_base = ep->hba->netdev->base_addr; + if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && + (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { + config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); + first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5; + ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3; + if (ctx_sz) + reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE + + PAGE_SIZE * + (((cid_num - first_l4l5) / ctx_sz) + 256); + else + reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); + } else + /* 5709 device in normal node and 5706/5708 devices */ + reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); + + ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, + MB_KERNEL_CTX_SIZE); + if (!ep->qp.ctx_base) + return -ENOMEM; + +arm_cq: + bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE); + return 0; +} diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c new file mode 100644 index 000000000000..ae4b2d588fd3 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -0,0 +1,438 @@ +/* bnx2i.c: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#include "bnx2i.h" + +static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); +static u32 adapter_count; +static int bnx2i_reg_device; + +#define DRV_MODULE_NAME "bnx2i" +#define DRV_MODULE_VERSION "2.0.1d" +#define DRV_MODULE_RELDATE "Mar 25, 2009" + +static char version[] __devinitdata = + "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + + +MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>"); +MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +static DEFINE_RWLOCK(bnx2i_dev_lock); + +unsigned int event_coal_div = 1; +module_param(event_coal_div, int, 0664); +MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); + +unsigned int en_tcp_dack = 1; +module_param(en_tcp_dack, int, 0664); +MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK"); + +unsigned int error_mask1 = 0x00; +module_param(error_mask1, int, 0664); +MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1"); + +unsigned int error_mask2 = 0x00; +module_param(error_mask2, int, 0664); +MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2"); + +unsigned int sq_size; +module_param(sq_size, int, 0664); +MODULE_PARM_DESC(sq_size, "Configure SQ size"); + +unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT; +module_param(rq_size, int, 0664); +MODULE_PARM_DESC(rq_size, "Configure RQ size"); + +u64 iscsi_error_mask = 0x00; + +static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ; + + +/** + * bnx2i_identify_device - identifies NetXtreme II device type + * @hba: Adapter structure pointer + * + * This function identifies the NX2 device type and sets appropriate + * queue mailbox register access method, 5709 requires driver to + * access MBOX regs using *bin* mode + */ +void bnx2i_identify_device(struct bnx2i_hba *hba) +{ + hba->cnic_dev_type = 0; + if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) || + (hba->pci_did == PCI_DEVICE_ID_NX2_5706S)) + set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type); + else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) || + (hba->pci_did == PCI_DEVICE_ID_NX2_5708S)) + set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type); + else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) || + (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) { + set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); + hba->mail_queue_access = BNX2I_MQ_BIN_MODE; + } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || + hba->pci_did == PCI_DEVICE_ID_NX2_57711) + set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); +} + + +/** + * get_adapter_list_head - returns head of adapter list + */ +struct bnx2i_hba *get_adapter_list_head(void) +{ + struct bnx2i_hba *hba = NULL; + struct bnx2i_hba *tmp_hba; + + if (!adapter_count) + goto hba_not_found; + + read_lock(&bnx2i_dev_lock); + list_for_each_entry(tmp_hba, &adapter_list, link) { + if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { + hba = tmp_hba; + break; + } + } + read_unlock(&bnx2i_dev_lock); +hba_not_found: + return hba; +} + + +/** + * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance + * @cnic: pointer to cnic device instance + * + */ +struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic) +{ + struct bnx2i_hba *hba, *temp; + + read_lock(&bnx2i_dev_lock); + list_for_each_entry_safe(hba, temp, &adapter_list, link) { + if (hba->cnic == cnic) { + read_unlock(&bnx2i_dev_lock); + return hba; + } + } + read_unlock(&bnx2i_dev_lock); + return NULL; +} + + +/** + * bnx2i_start - cnic callback to initialize & start adapter instance + * @handle: transparent handle pointing to adapter structure + * + * This function maps adapter structure to pcidev structure and initiates + * firmware handshake to enable/initialize on chip iscsi components + * This bnx2i - cnic interface api callback is issued after following + * 2 conditions are met - + * a) underlying network interface is up (marked by event 'NETDEV_UP' + * from netdev + * b) bnx2i adapter instance is registered + */ +void bnx2i_start(void *handle) +{ +#define BNX2I_INIT_POLL_TIME (1000 / HZ) + struct bnx2i_hba *hba = handle; + int i = HZ; + + bnx2i_send_fw_iscsi_init_msg(hba); + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) + msleep(BNX2I_INIT_POLL_TIME); +} + + +/** + * bnx2i_stop - cnic callback to shutdown adapter instance + * @handle: transparent handle pointing to adapter structure + * + * driver checks if adapter is already in shutdown mode, if not start + * the shutdown process + */ +void bnx2i_stop(void *handle) +{ + struct bnx2i_hba *hba = handle; + + /* check if cleanup happened in GOING_DOWN context */ + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, + &hba->adapter_state)) + iscsi_host_for_each_session(hba->shost, + bnx2i_drop_session); +} + +/** + * bnx2i_register_device - register bnx2i adapter instance with the cnic driver + * @hba: Adapter instance to register + * + * registers bnx2i adapter instance with the cnic driver while holding the + * adapter structure lock + */ +void bnx2i_register_device(struct bnx2i_hba *hba) +{ + if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || + test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + return; + } + + hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); + + spin_lock(&hba->lock); + bnx2i_reg_device++; + spin_unlock(&hba->lock); + + set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); +} + + +/** + * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver + * + * registers all bnx2i adapter instances with the cnic driver while holding + * the global resource lock + */ +void bnx2i_reg_dev_all(void) +{ + struct bnx2i_hba *hba, *temp; + + read_lock(&bnx2i_dev_lock); + list_for_each_entry_safe(hba, temp, &adapter_list, link) + bnx2i_register_device(hba); + read_unlock(&bnx2i_dev_lock); +} + + +/** + * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver + * @hba: Adapter instance to unregister + * + * registers bnx2i adapter instance with the cnic driver while holding + * the adapter structure lock + */ +static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) +{ + if (hba->ofld_conns_active || + !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) || + test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) + return; + + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + + spin_lock(&hba->lock); + bnx2i_reg_device--; + spin_unlock(&hba->lock); + + /* ep_disconnect could come before NETDEV_DOWN, driver won't + * see NETDEV_DOWN as it already unregistered itself. + */ + hba->adapter_state = 0; + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); +} + +/** + * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver + * + * unregisters all bnx2i adapter instances with the cnic driver while holding + * the global resource lock + */ +void bnx2i_unreg_dev_all(void) +{ + struct bnx2i_hba *hba, *temp; + + read_lock(&bnx2i_dev_lock); + list_for_each_entry_safe(hba, temp, &adapter_list, link) + bnx2i_unreg_one_device(hba); + read_unlock(&bnx2i_dev_lock); +} + + +/** + * bnx2i_init_one - initialize an adapter instance and allocate memory resources + * @hba: bnx2i adapter instance + * @cnic: cnic device handle + * + * Global resource lock and host adapter lock is held during critical sections + * below. This routine is called from cnic_register_driver() context and + * work horse thread which does majority of device specific initialization + */ +static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) +{ + int rc; + + read_lock(&bnx2i_dev_lock); + if (bnx2i_reg_device && + !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); + if (rc) /* duplicate registration */ + printk(KERN_ERR "bnx2i- dev reg failed\n"); + + spin_lock(&hba->lock); + bnx2i_reg_device++; + hba->age++; + spin_unlock(&hba->lock); + + set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + } + read_unlock(&bnx2i_dev_lock); + + write_lock(&bnx2i_dev_lock); + list_add_tail(&hba->link, &adapter_list); + adapter_count++; + write_unlock(&bnx2i_dev_lock); + return 0; +} + + +/** + * bnx2i_ulp_init - initialize an adapter instance + * @dev: cnic device handle + * + * Called from cnic_register_driver() context to initialize all enumerated + * cnic devices. This routine allocate adapter structure and other + * device specific resources. + */ +void bnx2i_ulp_init(struct cnic_dev *dev) +{ + struct bnx2i_hba *hba; + + /* Allocate a HBA structure for this device */ + hba = bnx2i_alloc_hba(dev); + if (!hba) { + printk(KERN_ERR "bnx2i init: hba initialization failed\n"); + return; + } + + /* Get PCI related information and update hba struct members */ + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + if (bnx2i_init_one(hba, dev)) { + printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); + bnx2i_free_hba(hba); + } else + hba->cnic = dev; +} + + +/** + * bnx2i_ulp_exit - shuts down adapter instance and frees all resources + * @dev: cnic device handle + * + */ +void bnx2i_ulp_exit(struct cnic_dev *dev) +{ + struct bnx2i_hba *hba; + + hba = bnx2i_find_hba_for_cnic(dev); + if (!hba) { + printk(KERN_INFO "bnx2i_ulp_exit: hba not " + "found, dev 0x%p\n", dev); + return; + } + write_lock(&bnx2i_dev_lock); + list_del_init(&hba->link); + adapter_count--; + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + + spin_lock(&hba->lock); + bnx2i_reg_device--; + spin_unlock(&hba->lock); + } + write_unlock(&bnx2i_dev_lock); + + bnx2i_free_hba(hba); +} + + +/** + * bnx2i_mod_init - module init entry point + * + * initialize any driver wide global data structures such as endpoint pool, + * tcp port manager/queue, sysfs. finally driver will register itself + * with the cnic module + */ +static int __init bnx2i_mod_init(void) +{ + int err; + + printk(KERN_INFO "%s", version); + + if (!is_power_of_2(sq_size)) + sq_size = roundup_pow_of_two(sq_size); + + bnx2i_scsi_xport_template = + iscsi_register_transport(&bnx2i_iscsi_transport); + if (!bnx2i_scsi_xport_template) { + printk(KERN_ERR "Could not register bnx2i transport.\n"); + err = -ENOMEM; + goto out; + } + + err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb); + if (err) { + printk(KERN_ERR "Could not register bnx2i cnic driver.\n"); + goto unreg_xport; + } + + return 0; + +unreg_xport: + iscsi_unregister_transport(&bnx2i_iscsi_transport); +out: + return err; +} + + +/** + * bnx2i_mod_exit - module cleanup/exit entry point + * + * Global resource lock and host adapter lock is held during critical sections + * in this function. Driver will browse through the adapter list, cleans-up + * each instance, unregisters iscsi transport name and finally driver will + * unregister itself with the cnic module + */ +static void __exit bnx2i_mod_exit(void) +{ + struct bnx2i_hba *hba; + + write_lock(&bnx2i_dev_lock); + while (!list_empty(&adapter_list)) { + hba = list_entry(adapter_list.next, struct bnx2i_hba, link); + list_del(&hba->link); + adapter_count--; + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + bnx2i_reg_device--; + } + + write_unlock(&bnx2i_dev_lock); + bnx2i_free_hba(hba); + write_lock(&bnx2i_dev_lock); + } + write_unlock(&bnx2i_dev_lock); + + iscsi_unregister_transport(&bnx2i_iscsi_transport); + cnic_unregister_driver(CNIC_ULP_ISCSI); +} + +module_init(bnx2i_mod_init); +module_exit(bnx2i_mod_exit); diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c new file mode 100644 index 000000000000..f7412196f2f8 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -0,0 +1,2064 @@ +/* + * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2006 - 2009 Broadcom Corporation + * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. + * Copyright (c) 2007, 2008 Mike Christie + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#include <scsi/scsi_tcq.h> +#include <scsi/libiscsi.h> +#include "bnx2i.h" + +struct scsi_transport_template *bnx2i_scsi_xport_template; +struct iscsi_transport bnx2i_iscsi_transport; +static struct scsi_host_template bnx2i_host_template; + +/* + * Global endpoint resource info + */ +static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ + + +static int bnx2i_adapter_ready(struct bnx2i_hba *hba) +{ + int retval = 0; + + if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || + test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || + test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) + retval = -EPERM; + return retval; +} + +/** + * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks + * @cmd: iscsi cmd struct pointer + * @buf_off: absolute buffer offset + * @start_bd_off: u32 pointer to return the offset within the BD + * indicated by 'start_bd_idx' on which 'buf_off' falls + * @start_bd_idx: index of the BD on which 'buf_off' falls + * + * identifies & marks various bd info for scsi command's imm data, + * unsolicited data and the first solicited data seq. + */ +static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, + u32 *start_bd_off, u32 *start_bd_idx) +{ + struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; + u32 cur_offset = 0; + u32 cur_bd_idx = 0; + + if (buf_off) { + while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { + cur_offset += bd_tbl->buffer_length; + cur_bd_idx++; + bd_tbl++; + } + } + + *start_bd_off = buf_off - cur_offset; + *start_bd_idx = cur_bd_idx; +} + +/** + * bnx2i_setup_write_cmd_bd_info - sets up BD various information + * @task: transport layer's cmd struct pointer + * + * identifies & marks various bd info for scsi command's immediate data, + * unsolicited data and first solicited data seq which includes BD start + * index & BD buf off. his function takes into account iscsi parameter such + * as immediate data and unsolicited data is support on this connection. + */ +static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) +{ + struct bnx2i_cmd *cmd = task->dd_data; + u32 start_bd_offset; + u32 start_bd_idx; + u32 buffer_offset = 0; + u32 cmd_len = cmd->req.total_data_transfer_length; + + /* if ImmediateData is turned off & IntialR2T is turned on, + * there will be no immediate or unsolicited data, just return. + */ + if (!iscsi_task_has_unsol_data(task) && !task->imm_count) + return; + + /* Immediate data */ + buffer_offset += task->imm_count; + if (task->imm_count == cmd_len) + return; + + if (iscsi_task_has_unsol_data(task)) { + bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, + &start_bd_offset, &start_bd_idx); + cmd->req.ud_buffer_offset = start_bd_offset; + cmd->req.ud_start_bd_index = start_bd_idx; + buffer_offset += task->unsol_r2t.data_length; + } + + if (buffer_offset != cmd_len) { + bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, + &start_bd_offset, &start_bd_idx); + if ((start_bd_offset > task->conn->session->first_burst) || + (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { + int i = 0; + + iscsi_conn_printk(KERN_ALERT, task->conn, + "bnx2i- error, buf offset 0x%x " + "bd_valid %d use_sg %d\n", + buffer_offset, cmd->io_tbl.bd_valid, + scsi_sg_count(cmd->scsi_cmd)); + for (i = 0; i < cmd->io_tbl.bd_valid; i++) + iscsi_conn_printk(KERN_ALERT, task->conn, + "bnx2i err, bd[%d]: len %x\n", + i, cmd->io_tbl.bd_tbl[i].\ + buffer_length); + } + cmd->req.sd_buffer_offset = start_bd_offset; + cmd->req.sd_start_bd_index = start_bd_idx; + } +} + + + +/** + * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table + * @hba: adapter instance + * @cmd: iscsi cmd struct pointer + * + * map SG list + */ +static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int bd_count = 0; + int sg_count; + int sg_len; + u64 addr; + int i; + + BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); + + sg_count = scsi_dma_map(sc); + + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = (u64) sg_dma_address(sg); + bd[bd_count].buffer_addr_lo = addr & 0xffffffff; + bd[bd_count].buffer_addr_hi = addr >> 32; + bd[bd_count].buffer_length = sg_len; + bd[bd_count].flags = 0; + if (bd_count == 0) + bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; + + byte_count += sg_len; + bd_count++; + } + + if (bd_count) + bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; + + BUG_ON(byte_count != scsi_bufflen(sc)); + return bd_count; +} + +/** + * bnx2i_iscsi_map_sg_list - maps SG list + * @cmd: iscsi cmd struct pointer + * + * creates BD list table for the command + */ +static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) +{ + int bd_count; + + bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); + if (!bd_count) { + struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; + + bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; + bd[0].buffer_length = bd[0].flags = 0; + } + cmd->io_tbl.bd_valid = bd_count; +} + + +/** + * bnx2i_iscsi_unmap_sg_list - unmaps SG list + * @cmd: iscsi cmd struct pointer + * + * unmap IO buffers and invalidate the BD table + */ +void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) +{ + struct scsi_cmnd *sc = cmd->scsi_cmd; + + if (cmd->io_tbl.bd_valid && sc) { + scsi_dma_unmap(sc); + cmd->io_tbl.bd_valid = 0; + } +} + +static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) +{ + memset(&cmd->req, 0x00, sizeof(cmd->req)); + cmd->req.op_code = 0xFF; + cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; + cmd->req.bd_list_addr_hi = + (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); + +} + + +/** + * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' + * @hba: pointer to adapter instance + * @conn: pointer to iscsi connection + * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) + * + * update iscsi cid table entry with connection pointer. This enables + * driver to quickly get hold of connection structure pointer in + * completion/interrupt thread using iscsi context ID + */ +static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn, + u32 iscsi_cid) +{ + if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "conn bind - entry #%d not free\n", iscsi_cid); + return -EBUSY; + } + + hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; + return 0; +} + + +/** + * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) + */ +struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, + u16 iscsi_cid) +{ + if (!hba->cid_que.conn_cid_tbl) { + printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); + return NULL; + + } else if (iscsi_cid >= hba->max_active_conns) { + printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); + return NULL; + } + return hba->cid_que.conn_cid_tbl[iscsi_cid]; +} + + +/** + * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool + * @hba: pointer to adapter instance + */ +static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) +{ + int idx; + + if (!hba->cid_que.cid_free_cnt) + return -1; + + idx = hba->cid_que.cid_q_cons_idx; + hba->cid_que.cid_q_cons_idx++; + if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) + hba->cid_que.cid_q_cons_idx = 0; + + hba->cid_que.cid_free_cnt--; + return hba->cid_que.cid_que[idx]; +} + + +/** + * bnx2i_free_iscsi_cid - returns tcp port to free list + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to free + */ +static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) +{ + int idx; + + if (iscsi_cid == (u16) -1) + return; + + hba->cid_que.cid_free_cnt++; + + idx = hba->cid_que.cid_q_prod_idx; + hba->cid_que.cid_que[idx] = iscsi_cid; + hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; + hba->cid_que.cid_q_prod_idx++; + if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) + hba->cid_que.cid_q_prod_idx = 0; +} + + +/** + * bnx2i_setup_free_cid_que - sets up free iscsi cid queue + * @hba: pointer to adapter instance + * + * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, + * and initialize table attributes + */ +static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) +{ + int mem_size; + int i; + + mem_size = hba->max_active_conns * sizeof(u32); + mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); + if (!hba->cid_que.cid_que_base) + return -ENOMEM; + + mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); + mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); + if (!hba->cid_que.conn_cid_tbl) { + kfree(hba->cid_que.cid_que_base); + hba->cid_que.cid_que_base = NULL; + return -ENOMEM; + } + + hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; + hba->cid_que.cid_q_prod_idx = 0; + hba->cid_que.cid_q_cons_idx = 0; + hba->cid_que.cid_q_max_idx = hba->max_active_conns; + hba->cid_que.cid_free_cnt = hba->max_active_conns; + + for (i = 0; i < hba->max_active_conns; i++) { + hba->cid_que.cid_que[i] = i; + hba->cid_que.conn_cid_tbl[i] = NULL; + } + return 0; +} + + +/** + * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources + * @hba: pointer to adapter instance + */ +static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) +{ + kfree(hba->cid_que.cid_que_base); + hba->cid_que.cid_que_base = NULL; + + kfree(hba->cid_que.conn_cid_tbl); + hba->cid_que.conn_cid_tbl = NULL; +} + + +/** + * bnx2i_alloc_ep - allocates ep structure from global pool + * @hba: pointer to adapter instance + * + * routine allocates a free endpoint structure from global pool and + * a tcp port to be used for this connection. Global resource lock, + * 'bnx2i_resc_lock' is held while accessing shared global data structures + */ +static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) +{ + struct iscsi_endpoint *ep; + struct bnx2i_endpoint *bnx2i_ep; + + ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); + if (!ep) { + printk(KERN_ERR "bnx2i: Could not allocate ep\n"); + return NULL; + } + + bnx2i_ep = ep->dd_data; + INIT_LIST_HEAD(&bnx2i_ep->link); + bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->hba = hba; + bnx2i_ep->hba_age = hba->age; + hba->ofld_conns_active++; + init_waitqueue_head(&bnx2i_ep->ofld_wait); + return ep; +} + + +/** + * bnx2i_free_ep - free endpoint + * @ep: pointer to iscsi endpoint structure + */ +static void bnx2i_free_ep(struct iscsi_endpoint *ep) +{ + struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; + unsigned long flags; + + spin_lock_irqsave(&bnx2i_resc_lock, flags); + bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->hba->ofld_conns_active--; + + bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); + if (bnx2i_ep->conn) { + bnx2i_ep->conn->ep = NULL; + bnx2i_ep->conn = NULL; + } + + bnx2i_ep->hba = NULL; + spin_unlock_irqrestore(&bnx2i_resc_lock, flags); + iscsi_destroy_endpoint(ep); +} + + +/** + * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command + * @hba: adapter instance pointer + * @session: iscsi session pointer + * @cmd: iscsi command structure + */ +static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, + struct bnx2i_cmd *cmd) +{ + struct io_bdt *io = &cmd->io_tbl; + struct iscsi_bd *bd; + + io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), + &io->bd_tbl_dma, GFP_KERNEL); + if (!io->bd_tbl) { + iscsi_session_printk(KERN_ERR, session, "Could not " + "allocate bdt.\n"); + return -ENOMEM; + } + io->bd_valid = 0; + return 0; +} + +/** + * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table + * @hba: adapter instance pointer + * @session: iscsi session pointer + * @cmd: iscsi command structure + */ +static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct bnx2i_cmd *cmd = task->dd_data; + + if (cmd->io_tbl.bd_tbl) + dma_free_coherent(&hba->pcidev->dev, + ISCSI_MAX_BDS_PER_CMD * + sizeof(struct iscsi_bd), + cmd->io_tbl.bd_tbl, + cmd->io_tbl.bd_tbl_dma); + } + +} + + +/** + * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session + * @hba: adapter instance pointer + * @session: iscsi session pointer + */ +static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, + struct iscsi_session *session) +{ + int i; + + for (i = 0; i < session->cmds_max; i++) { + struct iscsi_task *task = session->cmds[i]; + struct bnx2i_cmd *cmd = task->dd_data; + + /* Anil */ + task->hdr = &cmd->hdr; + task->hdr_max = sizeof(struct iscsi_hdr); + + if (bnx2i_alloc_bdt(hba, session, cmd)) + goto free_bdts; + } + + return 0; + +free_bdts: + bnx2i_destroy_cmd_pool(hba, session); + return -ENOMEM; +} + + +/** + * bnx2i_setup_mp_bdt - allocate BD table resources + * @hba: pointer to adapter structure + * + * Allocate memory for dummy buffer and associated BD + * table to be used by middle path (MP) requests + */ +static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) +{ + int rc = 0; + struct iscsi_bd *mp_bdt; + u64 addr; + + hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->mp_bd_dma, GFP_KERNEL); + if (!hba->mp_bd_tbl) { + printk(KERN_ERR "unable to allocate Middle Path BDT\n"); + rc = -1; + goto out; + } + + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->dummy_buf_dma, GFP_KERNEL); + if (!hba->dummy_buffer) { + printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->mp_bd_tbl, hba->mp_bd_dma); + hba->mp_bd_tbl = NULL; + rc = -1; + goto out; + } + + mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; + addr = (unsigned long) hba->dummy_buf_dma; + mp_bdt->buffer_addr_lo = addr & 0xffffffff; + mp_bdt->buffer_addr_hi = addr >> 32; + mp_bdt->buffer_length = PAGE_SIZE; + mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; +out: + return rc; +} + + +/** + * bnx2i_free_mp_bdt - releases ITT back to free pool + * @hba: pointer to adapter instance + * + * free MP dummy buffer and associated BD table + */ +static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) +{ + if (hba->mp_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->mp_bd_tbl, hba->mp_bd_dma); + hba->mp_bd_tbl = NULL; + } + if (hba->dummy_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->dummy_buffer, hba->dummy_buf_dma); + hba->dummy_buffer = NULL; + } + return; +} + +/** + * bnx2i_drop_session - notifies iscsid of connection error. + * @hba: adapter instance pointer + * @session: iscsi session pointer + * + * This notifies iscsid that there is a error, so it can initiate + * recovery. + * + * This relies on caller using the iscsi class iterator so the object + * is refcounted and does not disapper from under us. + */ +void bnx2i_drop_session(struct iscsi_cls_session *cls_session) +{ + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); +} + +/** + * bnx2i_ep_destroy_list_add - add an entry to EP destroy list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport indentifier) structure + * + * EP destroy queue manager + */ +static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_add_tail(&ep->link, &hba->ep_destroy_list); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + +/** + * bnx2i_ep_destroy_list_del - add an entry to EP destroy list + * + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport indentifier) structure + * + * EP destroy queue manager + */ +static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_del_init(&ep->link); + write_unlock_bh(&hba->ep_rdwr_lock); + + return 0; +} + +/** + * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport indentifier) structure + * + * pending conn offload completion queue manager + */ +static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_add_tail(&ep->link, &hba->ep_ofld_list); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + +/** + * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list + * @hba: pointer to adapter instance + * @ep: pointer to endpoint (transport indentifier) structure + * + * pending conn offload completion queue manager + */ +static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + write_lock_bh(&hba->ep_rdwr_lock); + list_del_init(&ep->link); + write_unlock_bh(&hba->ep_rdwr_lock); + return 0; +} + + +/** + * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints + * + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to find + * + */ +struct bnx2i_endpoint * +bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) +{ + struct list_head *list; + struct list_head *tmp; + struct bnx2i_endpoint *ep; + + read_lock_bh(&hba->ep_rdwr_lock); + list_for_each_safe(list, tmp, &hba->ep_ofld_list) { + ep = (struct bnx2i_endpoint *)list; + + if (ep->ep_iscsi_cid == iscsi_cid) + break; + ep = NULL; + } + read_unlock_bh(&hba->ep_rdwr_lock); + + if (!ep) + printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); + return ep; +} + + +/** + * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list + * @hba: pointer to adapter instance + * @iscsi_cid: iscsi context ID to find + * + */ +struct bnx2i_endpoint * +bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) +{ + struct list_head *list; + struct list_head *tmp; + struct bnx2i_endpoint *ep; + + read_lock_bh(&hba->ep_rdwr_lock); + list_for_each_safe(list, tmp, &hba->ep_destroy_list) { + ep = (struct bnx2i_endpoint *)list; + + if (ep->ep_iscsi_cid == iscsi_cid) + break; + ep = NULL; + } + read_unlock_bh(&hba->ep_rdwr_lock); + + if (!ep) + printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); + + return ep; +} + +/** + * bnx2i_setup_host_queue_size - assigns shost->can_queue param + * @hba: pointer to adapter instance + * @shost: scsi host pointer + * + * Initializes 'can_queue' parameter based on how many outstanding commands + * the device can handle. Each device 5708/5709/57710 has different + * capabilities + */ +static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, + struct Scsi_Host *shost) +{ + if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; + else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; + else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; + else + shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; +} + + +/** + * bnx2i_alloc_hba - allocate and init adapter instance + * @cnic: cnic device pointer + * + * allocate & initialize adapter structure and call other + * support routines to do per adapter initialization + */ +struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) +{ + struct Scsi_Host *shost; + struct bnx2i_hba *hba; + + shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); + if (!shost) + return NULL; + shost->dma_boundary = cnic->pcidev->dma_mask; + shost->transportt = bnx2i_scsi_xport_template; + shost->max_id = ISCSI_MAX_CONNS_PER_HBA; + shost->max_channel = 0; + shost->max_lun = 512; + shost->max_cmd_len = 16; + + hba = iscsi_host_priv(shost); + hba->shost = shost; + hba->netdev = cnic->netdev; + /* Get PCI related information and update hba struct members */ + hba->pcidev = cnic->pcidev; + pci_dev_get(hba->pcidev); + hba->pci_did = hba->pcidev->device; + hba->pci_vid = hba->pcidev->vendor; + hba->pci_sdid = hba->pcidev->subsystem_device; + hba->pci_svid = hba->pcidev->subsystem_vendor; + hba->pci_func = PCI_FUNC(hba->pcidev->devfn); + hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); + bnx2i_identify_device(hba); + + bnx2i_identify_device(hba); + bnx2i_setup_host_queue_size(hba, shost); + + if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { + hba->regview = ioremap_nocache(hba->netdev->base_addr, + BNX2_MQ_CONFIG2); + if (!hba->regview) + goto ioreg_map_err; + } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); + if (!hba->regview) + goto ioreg_map_err; + } + + if (bnx2i_setup_mp_bdt(hba)) + goto mp_bdt_mem_err; + + INIT_LIST_HEAD(&hba->ep_ofld_list); + INIT_LIST_HEAD(&hba->ep_destroy_list); + rwlock_init(&hba->ep_rdwr_lock); + + hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; + + /* different values for 5708/5709/57710 */ + hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; + + if (bnx2i_setup_free_cid_que(hba)) + goto cid_que_err; + + /* SQ/RQ/CQ size can be changed via sysfx interface */ + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) + hba->max_sqes = sq_size; + else + hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; + } else { /* 5706/5708/5709 */ + if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) + hba->max_sqes = sq_size; + else + hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; + } + + hba->max_rqes = rq_size; + hba->max_cqes = hba->max_sqes + rq_size; + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { + if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) + hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; + } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) + hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; + + hba->num_ccell = hba->max_sqes / 2; + + spin_lock_init(&hba->lock); + mutex_init(&hba->net_dev_lock); + + if (iscsi_host_add(shost, &hba->pcidev->dev)) + goto free_dump_mem; + return hba; + +free_dump_mem: + bnx2i_release_free_cid_que(hba); +cid_que_err: + bnx2i_free_mp_bdt(hba); +mp_bdt_mem_err: + if (hba->regview) { + iounmap(hba->regview); + hba->regview = NULL; + } +ioreg_map_err: + pci_dev_put(hba->pcidev); + scsi_host_put(shost); + return NULL; +} + +/** + * bnx2i_free_hba- releases hba structure and resources held by the adapter + * @hba: pointer to adapter instance + * + * free adapter structure and call various cleanup routines. + */ +void bnx2i_free_hba(struct bnx2i_hba *hba) +{ + struct Scsi_Host *shost = hba->shost; + + iscsi_host_remove(shost); + INIT_LIST_HEAD(&hba->ep_ofld_list); + INIT_LIST_HEAD(&hba->ep_destroy_list); + pci_dev_put(hba->pcidev); + + if (hba->regview) { + iounmap(hba->regview); + hba->regview = NULL; + } + bnx2i_free_mp_bdt(hba); + bnx2i_release_free_cid_que(hba); + iscsi_host_free(shost); +} + +/** + * bnx2i_conn_free_login_resources - free DMA resources used for login process + * @hba: pointer to adapter instance + * @bnx2i_conn: iscsi connection pointer + * + * Login related resources, mostly BDT & payload DMA memory is freed + */ +static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + if (bnx2i_conn->gen_pdu.resp_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + bnx2i_conn->gen_pdu.resp_bd_tbl, + bnx2i_conn->gen_pdu.resp_bd_dma); + bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; + } + + if (bnx2i_conn->gen_pdu.req_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + bnx2i_conn->gen_pdu.req_bd_tbl, + bnx2i_conn->gen_pdu.req_bd_dma); + bnx2i_conn->gen_pdu.req_bd_tbl = NULL; + } + + if (bnx2i_conn->gen_pdu.resp_buf) { + dma_free_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_dma_addr); + bnx2i_conn->gen_pdu.resp_buf = NULL; + } + + if (bnx2i_conn->gen_pdu.req_buf) { + dma_free_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.req_buf, + bnx2i_conn->gen_pdu.req_dma_addr); + bnx2i_conn->gen_pdu.req_buf = NULL; + } +} + +/** + * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. + * @hba: pointer to adapter instance + * @bnx2i_conn: iscsi connection pointer + * + * Mgmt task DNA resources are allocated in this routine. + */ +static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, + struct bnx2i_conn *bnx2i_conn) +{ + /* Allocate memory for login request/response buffers */ + bnx2i_conn->gen_pdu.req_buf = + dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &bnx2i_conn->gen_pdu.req_dma_addr, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.req_buf == NULL) + goto login_req_buf_failure; + + bnx2i_conn->gen_pdu.req_buf_size = 0; + bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; + + bnx2i_conn->gen_pdu.resp_buf = + dma_alloc_coherent(&hba->pcidev->dev, + ISCSI_DEF_MAX_RECV_SEG_LEN, + &bnx2i_conn->gen_pdu.resp_dma_addr, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.resp_buf == NULL) + goto login_resp_buf_failure; + + bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; + bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; + + bnx2i_conn->gen_pdu.req_bd_tbl = + dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); + if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) + goto login_req_bd_tbl_failure; + + bnx2i_conn->gen_pdu.resp_bd_tbl = + dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &bnx2i_conn->gen_pdu.resp_bd_dma, + GFP_KERNEL); + if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) + goto login_resp_bd_tbl_failure; + + return 0; + +login_resp_bd_tbl_failure: + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + bnx2i_conn->gen_pdu.req_bd_tbl, + bnx2i_conn->gen_pdu.req_bd_dma); + bnx2i_conn->gen_pdu.req_bd_tbl = NULL; + +login_req_bd_tbl_failure: + dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_dma_addr); + bnx2i_conn->gen_pdu.resp_buf = NULL; +login_resp_buf_failure: + dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, + bnx2i_conn->gen_pdu.req_buf, + bnx2i_conn->gen_pdu.req_dma_addr); + bnx2i_conn->gen_pdu.req_buf = NULL; +login_req_buf_failure: + iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, + "login resource alloc failed!!\n"); + return -ENOMEM; + +} + + +/** + * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. + * @bnx2i_conn: iscsi connection pointer + * + * Allocates buffers and BD tables before shipping requests to cnic + * for PDUs prepared by 'iscsid' daemon + */ +static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) +{ + struct iscsi_bd *bd_tbl; + + bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; + + bd_tbl->buffer_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); + bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; + bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - + bnx2i_conn->gen_pdu.req_buf; + bd_tbl->reserved0 = 0; + bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; + + bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; + bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; + bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; + bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; + bd_tbl->reserved0 = 0; + bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | + ISCSI_BD_FIRST_IN_BD_CHAIN; +} + + +/** + * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. + * @task: transport layer task pointer + * + * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, + * Nop-out and Logout requests flow through this path. + */ +static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) +{ + struct bnx2i_cmd *cmd = task->dd_data; + struct bnx2i_conn *bnx2i_conn = cmd->conn; + int rc = 0; + char *buf; + int data_len; + + bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); + switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { + case ISCSI_OP_LOGIN: + bnx2i_send_iscsi_login(bnx2i_conn, task); + break; + case ISCSI_OP_NOOP_OUT: + data_len = bnx2i_conn->gen_pdu.req_buf_size; + buf = bnx2i_conn->gen_pdu.req_buf; + if (data_len) + rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, + RESERVED_ITT, + buf, data_len, 1); + else + rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, + RESERVED_ITT, + NULL, 0, 1); + break; + case ISCSI_OP_LOGOUT: + rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); + break; + case ISCSI_OP_SCSI_TMFUNC: + rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); + break; + default: + iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, + "send_gen: unsupported op 0x%x\n", + task->hdr->opcode); + } + return rc; +} + + +/********************************************************************** + * SCSI-ML Interface + **********************************************************************/ + +/** + * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe + * @sc: SCSI-ML command pointer + * @cmd: iscsi cmd pointer + */ +static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) +{ + u32 dword; + int lpcnt; + u8 *srcp; + u32 *dstp; + u32 scsi_lun[2]; + + int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); + cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); + cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); + + lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); + srcp = (u8 *) sc->cmnd; + dstp = (u32 *) cmd->req.cdb; + while (lpcnt--) { + memcpy(&dword, (const void *) srcp, 4); + *dstp = cpu_to_be32(dword); + srcp += 4; + dstp++; + } + if (sc->cmd_len & 0x3) { + dword = (u32) srcp[0] | ((u32) srcp[1] << 8); + *dstp = cpu_to_be32(dword); + } +} + +static void bnx2i_cleanup_task(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_hba *hba = bnx2i_conn->hba; + + /* + * mgmt task or cmd was never sent to us to transmit. + */ + if (!task->sc || task->state == ISCSI_TASK_PENDING) + return; + /* + * need to clean-up task context to claim dma buffers + */ + if (task->state == ISCSI_TASK_ABRT_TMF) { + bnx2i_send_cmd_cleanup_req(hba, task->dd_data); + + spin_unlock_bh(&conn->session->lock); + wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, + msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); + spin_lock_bh(&conn->session->lock); + } + bnx2i_iscsi_unmap_sg_list(task->dd_data); +} + +/** + * bnx2i_mtask_xmit - transmit mtask to chip for further processing + * @conn: transport layer conn structure pointer + * @task: transport layer command structure pointer + */ +static int +bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +{ + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct bnx2i_cmd *cmd = task->dd_data; + + memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); + + bnx2i_setup_cmd_wqe_template(cmd); + bnx2i_conn->gen_pdu.req_buf_size = task->data_count; + if (task->data_count) { + memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, + task->data_count); + bnx2i_conn->gen_pdu.req_wr_ptr = + bnx2i_conn->gen_pdu.req_buf + task->data_count; + } + cmd->conn = conn->dd_data; + cmd->scsi_cmd = NULL; + return bnx2i_iscsi_send_generic_request(task); +} + +/** + * bnx2i_task_xmit - transmit iscsi command to chip for further processing + * @task: transport layer command structure pointer + * + * maps SG buffers and send request to chip/firmware in the form of SQ WQE + */ +static int bnx2i_task_xmit(struct iscsi_task *task) +{ + struct iscsi_conn *conn = task->conn; + struct iscsi_session *session = conn->session; + struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct scsi_cmnd *sc = task->sc; + struct bnx2i_cmd *cmd = task->dd_data; + struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; + + if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) + return -ENOTCONN; + + if (!bnx2i_conn->is_bound) + return -ENOTCONN; + + /* + * If there is no scsi_cmnd this must be a mgmt task + */ + if (!sc) + return bnx2i_mtask_xmit(conn, task); + + bnx2i_setup_cmd_wqe_template(cmd); + cmd->req.op_code = ISCSI_OP_SCSI_CMD; + cmd->conn = bnx2i_conn; + cmd->scsi_cmd = sc; + cmd->req.total_data_transfer_length = scsi_bufflen(sc); + cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); + + bnx2i_iscsi_map_sg_list(cmd); + bnx2i_cpy_scsi_cdb(sc, cmd); + + cmd->req.op_attr = ISCSI_ATTR_SIMPLE; + if (sc->sc_data_direction == DMA_TO_DEVICE) { + cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; + cmd->req.itt = task->itt | + (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); + bnx2i_setup_write_cmd_bd_info(task); + } else { + if (scsi_bufflen(sc)) + cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; + cmd->req.itt = task->itt | + (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); + } + + cmd->req.num_bds = cmd->io_tbl.bd_valid; + if (!cmd->io_tbl.bd_valid) { + cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; + cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); + cmd->req.num_bds = 1; + } + + bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); + return 0; +} + +/** + * bnx2i_session_create - create a new iscsi session + * @cmds_max: max commands supported + * @qdepth: scsi queue depth to support + * @initial_cmdsn: initial iscsi CMDSN to be used for this session + * + * Creates a new iSCSI session instance on given device. + */ +static struct iscsi_cls_session * +bnx2i_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, uint16_t qdepth, + uint32_t initial_cmdsn) +{ + struct Scsi_Host *shost; + struct iscsi_cls_session *cls_session; + struct bnx2i_hba *hba; + struct bnx2i_endpoint *bnx2i_ep; + + if (!ep) { + printk(KERN_ERR "bnx2i: missing ep.\n"); + return NULL; + } + + bnx2i_ep = ep->dd_data; + shost = bnx2i_ep->hba->shost; + hba = iscsi_host_priv(shost); + if (bnx2i_adapter_ready(hba)) + return NULL; + + /* + * user can override hw limit as long as it is within + * the min/max. + */ + if (cmds_max > hba->max_sqes) + cmds_max = hba->max_sqes; + else if (cmds_max < BNX2I_SQ_WQES_MIN) + cmds_max = BNX2I_SQ_WQES_MIN; + + cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, + cmds_max, sizeof(struct bnx2i_cmd), + initial_cmdsn, ISCSI_MAX_TARGET); + if (!cls_session) + return NULL; + + if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) + goto session_teardown; + return cls_session; + +session_teardown: + iscsi_session_teardown(cls_session); + return NULL; +} + + +/** + * bnx2i_session_destroy - destroys iscsi session + * @cls_session: pointer to iscsi cls session + * + * Destroys previously created iSCSI session instance and releases + * all resources held by it + */ +static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + + bnx2i_destroy_cmd_pool(hba, session); + iscsi_session_teardown(cls_session); +} + + +/** + * bnx2i_conn_create - create iscsi connection instance + * @cls_session: pointer to iscsi cls session + * @cid: iscsi cid as per rfc (not NX2's CID terminology) + * + * Creates a new iSCSI connection instance for a given session + */ +static struct iscsi_cls_conn * +bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) +{ + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_conn *bnx2i_conn; + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + + cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), + cid); + if (!cls_conn) + return NULL; + conn = cls_conn->dd_data; + + bnx2i_conn = conn->dd_data; + bnx2i_conn->cls_conn = cls_conn; + bnx2i_conn->hba = hba; + /* 'ep' ptr will be assigned in bind() call */ + bnx2i_conn->ep = NULL; + init_completion(&bnx2i_conn->cmd_cleanup_cmpl); + + if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { + iscsi_conn_printk(KERN_ALERT, conn, + "conn_new: login resc alloc failed!!\n"); + goto free_conn; + } + + return cls_conn; + +free_conn: + iscsi_conn_teardown(cls_conn); + return NULL; +} + +/** + * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together + * @cls_session: pointer to iscsi cls session + * @cls_conn: pointer to iscsi cls conn + * @transport_fd: 64-bit EP handle + * @is_leading: leading connection on this session? + * + * Binds together iSCSI session instance, iSCSI connection instance + * and the TCP connection. This routine returns error code if + * TCP connection does not belong on the device iSCSI sess/conn + * is bound + */ +static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_fd, int is_leading) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct bnx2i_hba *hba = iscsi_host_priv(shost); + struct bnx2i_endpoint *bnx2i_ep; + struct iscsi_endpoint *ep; + int ret_code; + + ep = iscsi_lookup_endpoint(transport_fd); + if (!ep) + return -EINVAL; + + bnx2i_ep = ep->dd_data; + if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || + (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) + /* Peer disconnect via' FIN or RST */ + return -EINVAL; + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) + return -EINVAL; + + if (bnx2i_ep->hba != hba) { + /* Error - TCP connection does not belong to this device + */ + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, + "conn bind, ep=0x%p (%s) does not", + bnx2i_ep, bnx2i_ep->hba->netdev->name); + iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, + "belong to hba (%s)\n", + hba->netdev->name); + return -EEXIST; + } + + bnx2i_ep->conn = bnx2i_conn; + bnx2i_conn->ep = bnx2i_ep; + bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; + bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; + bnx2i_conn->is_bound = 1; + + ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, + bnx2i_ep->ep_iscsi_cid); + + /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 + * driver needs to explicitly replenish RQ index during setup. + */ + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) + bnx2i_put_rq_buf(bnx2i_conn, 0); + + bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); + return ret_code; +} + + +/** + * bnx2i_conn_destroy - destroy iscsi connection instance & release resources + * @cls_conn: pointer to iscsi cls conn + * + * Destroy an iSCSI connection instance and release memory resources held by + * this connection + */ +static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + struct Scsi_Host *shost; + struct bnx2i_hba *hba; + + shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); + hba = iscsi_host_priv(shost); + + bnx2i_conn_free_login_resources(hba, bnx2i_conn); + iscsi_conn_teardown(cls_conn); +} + + +/** + * bnx2i_conn_get_param - return iscsi connection parameter to caller + * @cls_conn: pointer to iscsi cls conn + * @param: parameter type identifier + * @buf: buffer pointer + * + * returns iSCSI connection parameters + */ +static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + int len = 0; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + if (bnx2i_conn->ep) + len = sprintf(buf, "%hu\n", + bnx2i_conn->ep->cm_sk->dst_port); + break; + case ISCSI_PARAM_CONN_ADDRESS: + if (bnx2i_conn->ep) + len = sprintf(buf, NIPQUAD_FMT "\n", + NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip)); + break; + default: + return iscsi_conn_get_param(cls_conn, param, buf); + } + + return len; +} + +/** + * bnx2i_host_get_param - returns host (adapter) related parameters + * @shost: scsi host pointer + * @param: parameter type identifier + * @buf: buffer pointer + */ +static int bnx2i_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct bnx2i_hba *hba = iscsi_host_priv(shost); + int len = 0; + + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); + break; + case ISCSI_HOST_PARAM_NETDEV_NAME: + len = sprintf(buf, "%s\n", hba->netdev->name); + break; + default: + return iscsi_host_get_param(shost, param, buf); + } + return len; +} + +/** + * bnx2i_conn_start - completes iscsi connection migration to FFP + * @cls_conn: pointer to iscsi cls conn + * + * last call in FFP migration to handover iscsi conn to the driver + */ +static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + struct bnx2i_conn *bnx2i_conn = conn->dd_data; + + bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; + bnx2i_update_iscsi_conn(conn); + + /* + * this should normally not sleep for a long time so it should + * not disrupt the caller. + */ + bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; + bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; + bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; + add_timer(&bnx2i_conn->ep->ofld_timer); + /* update iSCSI context for this conn, wait for CNIC to complete */ + wait_event_interruptible(bnx2i_conn->ep->ofld_wait, + bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_conn->ep->ofld_timer); + + iscsi_conn_start(cls_conn); + return 0; +} + + +/** + * bnx2i_conn_get_stats - returns iSCSI stats + * @cls_conn: pointer to iscsi cls conn + * @stats: pointer to iscsi statistic struct + */ +static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_conn *conn = cls_conn->dd_data; + + stats->txdata_octets = conn->txdata_octets; + stats->rxdata_octets = conn->rxdata_octets; + stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; + stats->dataout_pdus = conn->dataout_pdus_cnt; + stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; + stats->datain_pdus = conn->datain_pdus_cnt; + stats->r2t_pdus = conn->r2t_pdus_cnt; + stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; + stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; + stats->custom_length = 3; + strcpy(stats->custom[2].desc, "eh_abort_cnt"); + stats->custom[2].value = conn->eh_abort_cnt; + stats->digest_err = 0; + stats->timeout_err = 0; + stats->custom_length = 0; +} + + +/** + * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices + * @dst_addr: target IP address + * + * check if route resolves to BNX2 device + */ +static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) +{ + struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; + struct bnx2i_hba *hba; + struct cnic_dev *cnic = NULL; + + bnx2i_reg_dev_all(); + + hba = get_adapter_list_head(); + if (hba && hba->cnic) + cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); + if (!cnic) { + printk(KERN_ALERT "bnx2i: no route," + "can't connect using cnic\n"); + goto no_nx2_route; + } + hba = bnx2i_find_hba_for_cnic(cnic); + if (!hba) + goto no_nx2_route; + + if (bnx2i_adapter_ready(hba)) { + printk(KERN_ALERT "bnx2i: check route, hba not found\n"); + goto no_nx2_route; + } + if (hba->netdev->mtu > hba->mtu_supported) { + printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", + hba->netdev->name, hba->netdev->mtu); + printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", + hba->mtu_supported); + goto no_nx2_route; + } + return hba; +no_nx2_route: + return NULL; +} + + +/** + * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources + * @hba: pointer to adapter instance + * @ep: endpoint (transport indentifier) structure + * + * destroys cm_sock structure and on chip iscsi context + */ +static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, + struct bnx2i_endpoint *ep) +{ + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) + hba->cnic->cm_destroy(ep->cm_sk); + + if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) + ep->state = EP_STATE_DISCONN_COMPL; + + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && + ep->state == EP_STATE_DISCONN_TIMEDOUT) { + printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump," + " NW/PCIe trace, driver msgs to developers" + " for analysis\n"); + return 1; + } + + ep->state = EP_STATE_CLEANUP_START; + init_timer(&ep->ofld_timer); + ep->ofld_timer.expires = 10*HZ + jiffies; + ep->ofld_timer.function = bnx2i_ep_ofld_timer; + ep->ofld_timer.data = (unsigned long) ep; + add_timer(&ep->ofld_timer); + + bnx2i_ep_destroy_list_add(hba, ep); + + /* destroy iSCSI context, wait for it to complete */ + bnx2i_send_conn_destroy(hba, ep); + wait_event_interruptible(ep->ofld_wait, + (ep->state != EP_STATE_CLEANUP_START)); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&ep->ofld_timer); + + bnx2i_ep_destroy_list_del(hba, ep); + + if (ep->state != EP_STATE_CLEANUP_CMPL) + /* should never happen */ + printk(KERN_ALERT "bnx2i - conn destroy failed\n"); + + return 0; +} + + +/** + * bnx2i_ep_connect - establish TCP connection to target portal + * @shost: scsi host + * @dst_addr: target IP address + * @non_blocking: blocking or non-blocking call + * + * this routine initiates the TCP/IP connection by invoking Option-2 i/f + * with l5_core and the CNIC. This is a multi-step process of resolving + * route to target, create a iscsi connection context, handshaking with + * CNIC module to create/initialize the socket struct and finally + * sending down option-2 request to complete TCP 3-way handshake + */ +static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking) +{ + u32 iscsi_cid = BNX2I_CID_RESERVED; + struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; + struct sockaddr_in6 *desti6; + struct bnx2i_endpoint *bnx2i_ep; + struct bnx2i_hba *hba; + struct cnic_dev *cnic; + struct cnic_sockaddr saddr; + struct iscsi_endpoint *ep; + int rc = 0; + + if (shost) + /* driver is given scsi host to work with */ + hba = iscsi_host_priv(shost); + else + /* + * check if the given destination can be reached through + * a iscsi capable NetXtreme2 device + */ + hba = bnx2i_check_route(dst_addr); + if (!hba) { + rc = -ENOMEM; + goto check_busy; + } + + cnic = hba->cnic; + ep = bnx2i_alloc_ep(hba); + if (!ep) { + rc = -ENOMEM; + goto check_busy; + } + bnx2i_ep = ep->dd_data; + + mutex_lock(&hba->net_dev_lock); + if (bnx2i_adapter_ready(hba)) { + rc = -EPERM; + goto net_if_down; + } + + bnx2i_ep->state = EP_STATE_IDLE; + bnx2i_ep->ep_iscsi_cid = (u16) -1; + bnx2i_ep->num_active_cmds = 0; + iscsi_cid = bnx2i_alloc_iscsi_cid(hba); + if (iscsi_cid == -1) { + printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n"); + rc = -ENOMEM; + goto iscsi_cid_err; + } + bnx2i_ep->hba_age = hba->age; + + rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); + if (rc != 0) { + printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n"); + rc = -ENOMEM; + goto qp_resc_err; + } + + bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; + bnx2i_ep->state = EP_STATE_OFLD_START; + bnx2i_ep_ofld_list_add(hba, bnx2i_ep); + + init_timer(&bnx2i_ep->ofld_timer); + bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; + bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; + bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; + add_timer(&bnx2i_ep->ofld_timer); + + bnx2i_send_conn_ofld_req(hba, bnx2i_ep); + + /* Wait for CNIC hardware to setup conn context and return 'cid' */ + wait_event_interruptible(bnx2i_ep->ofld_wait, + bnx2i_ep->state != EP_STATE_OFLD_START); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_ep->ofld_timer); + + bnx2i_ep_ofld_list_del(hba, bnx2i_ep); + + if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { + rc = -ENOSPC; + goto conn_failed; + } + + rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, + iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); + if (rc) { + rc = -EINVAL; + goto conn_failed; + } + + bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; + bnx2i_ep->cm_sk->snd_buf = 256 * 1024; + clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); + + memset(&saddr, 0, sizeof(saddr)); + if (dst_addr->sa_family == AF_INET) { + desti = (struct sockaddr_in *) dst_addr; + saddr.remote.v4 = *desti; + saddr.local.v4.sin_family = desti->sin_family; + } else if (dst_addr->sa_family == AF_INET6) { + desti6 = (struct sockaddr_in6 *) dst_addr; + saddr.remote.v6 = *desti6; + saddr.local.v6.sin6_family = desti6->sin6_family; + } + + bnx2i_ep->timestamp = jiffies; + bnx2i_ep->state = EP_STATE_CONNECT_START; + if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + rc = -EINVAL; + goto conn_failed; + } else + rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); + + if (rc) + goto release_ep; + + if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) + goto release_ep; + mutex_unlock(&hba->net_dev_lock); + return ep; + +release_ep: + if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { + mutex_unlock(&hba->net_dev_lock); + return ERR_PTR(rc); + } +conn_failed: +net_if_down: +iscsi_cid_err: + bnx2i_free_qp_resc(hba, bnx2i_ep); +qp_resc_err: + bnx2i_free_ep(ep); + mutex_unlock(&hba->net_dev_lock); +check_busy: + bnx2i_unreg_dev_all(); + return ERR_PTR(rc); +} + + +/** + * bnx2i_ep_poll - polls for TCP connection establishement + * @ep: TCP connection (endpoint) handle + * @timeout_ms: timeout value in milli secs + * + * polls for TCP connect request to complete + */ +static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct bnx2i_endpoint *bnx2i_ep; + int rc = 0; + + bnx2i_ep = ep->dd_data; + if ((bnx2i_ep->state == EP_STATE_IDLE) || + (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || + (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) + return -1; + if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) + return 1; + + rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, + ((bnx2i_ep->state == + EP_STATE_OFLD_FAILED) || + (bnx2i_ep->state == + EP_STATE_CONNECT_FAILED) || + (bnx2i_ep->state == + EP_STATE_CONNECT_COMPL)), + msecs_to_jiffies(timeout_ms)); + if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) + rc = -1; + + if (rc > 0) + return 1; + else if (!rc) + return 0; /* timeout */ + else + return rc; +} + + +/** + * bnx2i_ep_tcp_conn_active - check EP state transition + * @ep: endpoint pointer + * + * check if underlying TCP connection is active + */ +static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) +{ + int ret; + int cnic_dev_10g = 0; + + if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) + cnic_dev_10g = 1; + + switch (bnx2i_ep->state) { + case EP_STATE_CONNECT_START: + case EP_STATE_CLEANUP_FAILED: + case EP_STATE_OFLD_FAILED: + case EP_STATE_DISCONN_TIMEDOUT: + ret = 0; + break; + case EP_STATE_CONNECT_COMPL: + case EP_STATE_ULP_UPDATE_START: + case EP_STATE_ULP_UPDATE_COMPL: + case EP_STATE_TCP_FIN_RCVD: + case EP_STATE_ULP_UPDATE_FAILED: + ret = 1; + break; + case EP_STATE_TCP_RST_RCVD: + ret = 0; + break; + case EP_STATE_CONNECT_FAILED: + if (cnic_dev_10g) + ret = 1; + else + ret = 0; + break; + default: + ret = 0; + } + + return ret; +} + + +/** + * bnx2i_ep_disconnect - executes TCP connection teardown process + * @ep: TCP connection (endpoint) handle + * + * executes TCP connection teardown process + */ +static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) +{ + struct bnx2i_endpoint *bnx2i_ep; + struct bnx2i_conn *bnx2i_conn = NULL; + struct iscsi_session *session = NULL; + struct iscsi_conn *conn; + struct cnic_dev *cnic; + struct bnx2i_hba *hba; + + bnx2i_ep = ep->dd_data; + + /* driver should not attempt connection cleanup untill TCP_CONNECT + * completes either successfully or fails. Timeout is 9-secs, so + * wait for it to complete + */ + while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && + !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) + msleep(250); + + if (bnx2i_ep->conn) { + bnx2i_conn = bnx2i_ep->conn; + conn = bnx2i_conn->cls_conn->dd_data; + session = conn->session; + + spin_lock_bh(&session->lock); + bnx2i_conn->is_bound = 0; + spin_unlock_bh(&session->lock); + } + + hba = bnx2i_ep->hba; + if (bnx2i_ep->state == EP_STATE_IDLE) + goto return_bnx2i_ep; + cnic = hba->cnic; + + mutex_lock(&hba->net_dev_lock); + + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + goto free_resc; + if (bnx2i_ep->hba_age != hba->age) + goto free_resc; + + if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) + goto destory_conn; + + bnx2i_ep->state = EP_STATE_DISCONN_START; + + init_timer(&bnx2i_ep->ofld_timer); + bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies; + bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; + bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; + add_timer(&bnx2i_ep->ofld_timer); + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + int close = 0; + + if (session) { + spin_lock_bh(&session->lock); + if (session->state == ISCSI_STATE_LOGGING_OUT) + close = 1; + spin_unlock_bh(&session->lock); + } + if (close) + cnic->cm_close(bnx2i_ep->cm_sk); + else + cnic->cm_abort(bnx2i_ep->cm_sk); + } else + goto free_resc; + + /* wait for option-2 conn teardown */ + wait_event_interruptible(bnx2i_ep->ofld_wait, + bnx2i_ep->state != EP_STATE_DISCONN_START); + + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&bnx2i_ep->ofld_timer); + +destory_conn: + if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { + mutex_unlock(&hba->net_dev_lock); + return; + } +free_resc: + mutex_unlock(&hba->net_dev_lock); + bnx2i_free_qp_resc(hba, bnx2i_ep); +return_bnx2i_ep: + if (bnx2i_conn) + bnx2i_conn->ep = NULL; + + bnx2i_free_ep(ep); + + if (!hba->ofld_conns_active) + bnx2i_unreg_dev_all(); +} + + +/** + * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler + * @buf: pointer to buffer containing iscsi path message + * + */ +static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) +{ + struct bnx2i_hba *hba = iscsi_host_priv(shost); + char *buf = (char *) params; + u16 len = sizeof(*params); + + /* handled by cnic driver */ + hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, + len); + + return 0; +} + + +/* + * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template + * used while registering with the scsi host and iSCSI transport module. + */ +static struct scsi_host_template bnx2i_host_template = { + .module = THIS_MODULE, + .name = "Broadcom Offload iSCSI Initiator", + .proc_name = "bnx2i", + .queuecommand = iscsi_queuecommand, + .eh_abort_handler = iscsi_eh_abort, + .eh_device_reset_handler = iscsi_eh_device_reset, + .eh_target_reset_handler = iscsi_eh_target_reset, + .can_queue = 1024, + .max_sectors = 127, + .cmd_per_lun = 32, + .this_id = -1, + .use_clustering = ENABLE_CLUSTERING, + .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, + .shost_attrs = bnx2i_dev_attributes, +}; + +struct iscsi_transport bnx2i_iscsi_transport = { + .owner = THIS_MODULE, + .name = "bnx2i", + .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | + CAP_MULTI_R2T | CAP_DATADGST | + CAP_DATA_PATH_OFFLOAD, + .param_mask = ISCSI_MAX_RECV_DLENGTH | + ISCSI_MAX_XMIT_DLENGTH | + ISCSI_HDRDGST_EN | + ISCSI_DATADGST_EN | + ISCSI_INITIAL_R2T_EN | + ISCSI_MAX_R2T | + ISCSI_IMM_DATA_EN | + ISCSI_FIRST_BURST | + ISCSI_MAX_BURST | + ISCSI_PDU_INORDER_EN | + ISCSI_DATASEQ_INORDER_EN | + ISCSI_ERL | + ISCSI_CONN_PORT | + ISCSI_CONN_ADDRESS | + ISCSI_EXP_STATSN | + ISCSI_PERSISTENT_PORT | + ISCSI_PERSISTENT_ADDRESS | + ISCSI_TARGET_NAME | ISCSI_TPGT | + ISCSI_USERNAME | ISCSI_PASSWORD | + ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | + ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | + ISCSI_LU_RESET_TMO | + ISCSI_PING_TMO | ISCSI_RECV_TMO | + ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, + .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, + .create_session = bnx2i_session_create, + .destroy_session = bnx2i_session_destroy, + .create_conn = bnx2i_conn_create, + .bind_conn = bnx2i_conn_bind, + .destroy_conn = bnx2i_conn_destroy, + .set_param = iscsi_set_param, + .get_conn_param = bnx2i_conn_get_param, + .get_session_param = iscsi_session_get_param, + .get_host_param = bnx2i_host_get_param, + .start_conn = bnx2i_conn_start, + .stop_conn = iscsi_conn_stop, + .send_pdu = iscsi_conn_send_pdu, + .xmit_task = bnx2i_task_xmit, + .get_stats = bnx2i_conn_get_stats, + /* TCP connect - disconnect - option-2 interface calls */ + .ep_connect = bnx2i_ep_connect, + .ep_poll = bnx2i_ep_poll, + .ep_disconnect = bnx2i_ep_disconnect, + .set_path = bnx2i_nl_set_path, + /* Error recovery timeout call */ + .session_recovery_timedout = iscsi_session_recovery_timedout, + .cleanup_task = bnx2i_cleanup_task, +}; diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c new file mode 100644 index 000000000000..96426b751eb2 --- /dev/null +++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c @@ -0,0 +1,142 @@ +/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. + * + * Copyright (c) 2004 - 2009 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) + */ + +#include "bnx2i.h" + +/** + * bnx2i_dev_to_hba - maps dev pointer to adapter struct + * @dev: device pointer + * + * Map device to hba structure + */ +static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev) +{ + struct Scsi_Host *shost = class_to_shost(dev); + return iscsi_host_priv(shost); +} + + +/** + * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size + * @dev: device pointer + * @buf: buffer to return current SQ size parameter + * + * Returns current SQ size parameter, this paramater determines the number + * outstanding iSCSI commands supported on a connection + */ +static ssize_t bnx2i_show_sq_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + return sprintf(buf, "0x%x\n", hba->max_sqes); +} + + +/** + * bnx2i_set_sq_info - update send queue (SQ) size parameter + * @dev: device pointer + * @buf: buffer to return current SQ size parameter + * @count: parameter buffer size + * + * Interface for user to change shared queue size allocated for each conn + * Must be within SQ limits and a power of 2. For the latter this is needed + * because of how libiscsi preallocates tasks. + */ +static ssize_t bnx2i_set_sq_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + u32 val; + int max_sq_size; + + if (hba->ofld_conns_active) + goto skip_config; + + if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) + max_sq_size = BNX2I_5770X_SQ_WQES_MAX; + else + max_sq_size = BNX2I_570X_SQ_WQES_MAX; + + if (sscanf(buf, " 0x%x ", &val) > 0) { + if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) && + (is_power_of_2(val))) + hba->max_sqes = val; + } + + return count; + +skip_config: + printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n"); + return 0; +} + + +/** + * bnx2i_show_ccell_info - returns command cell (HQ) size + * @dev: device pointer + * @buf: buffer to return current SQ size parameter + * + * returns per-connection TCP history queue size parameter + */ +static ssize_t bnx2i_show_ccell_info(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + return sprintf(buf, "0x%x\n", hba->num_ccell); +} + + +/** + * bnx2i_get_link_state - set command cell (HQ) size + * @dev: device pointer + * @buf: buffer to return current SQ size parameter + * @count: parameter buffer size + * + * updates per-connection TCP history queue size parameter + */ +static ssize_t bnx2i_set_ccell_info(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 val; + struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); + + if (hba->ofld_conns_active) + goto skip_config; + + if (sscanf(buf, " 0x%x ", &val) > 0) { + if ((val >= BNX2I_CCELLS_MIN) && + (val <= BNX2I_CCELLS_MAX)) { + hba->num_ccell = val; + } + } + + return count; + +skip_config: + printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n"); + return 0; +} + + +static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR, + bnx2i_show_sq_info, bnx2i_set_sq_info); +static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR, + bnx2i_show_ccell_info, bnx2i_set_ccell_info); + +struct device_attribute *bnx2i_dev_attributes[] = { + &dev_attr_sq_size, + &dev_attr_num_ccell, + NULL +}; diff --git a/drivers/scsi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgb3i/cxgb3i.h index 59b0958d2d11..e3133b58e594 100644 --- a/drivers/scsi/cxgb3i/cxgb3i.h +++ b/drivers/scsi/cxgb3i/cxgb3i.h @@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *); void cxgb3i_adapter_open(struct t3cdev *); void cxgb3i_adapter_close(struct t3cdev *); -struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *); struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, struct net_device *); void cxgb3i_hba_host_remove(struct cxgb3i_hba *); diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 9212400b9b13..74369a3f963b 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c @@ -13,6 +13,7 @@ #include <linux/inet.h> #include <linux/crypto.h> +#include <net/dst.h> #include <net/tcp.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev) * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device * @t3dev: t3cdev adapter */ -struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) +static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) { struct cxgb3i_adapter *snic; int i; @@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba) /** * cxgb3i_ep_connect - establish TCP connection to target portal + * @shost: scsi host to use * @dst_addr: target IP address * @non_blocking: blocking or non-blocking call * * Initiates a TCP/IP connection to the dst_addr */ -static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, +static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, int non_blocking) { struct iscsi_endpoint *ep; struct cxgb3i_endpoint *cep; - struct cxgb3i_hba *hba; + struct cxgb3i_hba *hba = NULL; struct s3_conn *c3cn = NULL; int err = 0; + if (shost) + hba = iscsi_host_priv(shost); + + cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba); + c3cn = cxgb3i_c3cn_create(); if (!c3cn) { cxgb3i_log_info("ep connect OOM.\n"); @@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, goto release_conn; } - err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr); + err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn, + (struct sockaddr_in *)dst_addr); if (err < 0) { cxgb3i_log_info("ep connect failed.\n"); goto release_conn; } + hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); if (!hba) { err = -ENOSPC; cxgb3i_log_info("NOT going through cxgbi device.\n"); goto release_conn; } + + if (shost && hba != iscsi_host_priv(shost)) { + err = -ENOSPC; + cxgb3i_log_info("Could not connect through request host%u\n", + shost->host_no); + goto release_conn; + } + if (c3cn_is_closing(c3cn)) { err = -ENOSPC; cxgb3i_log_info("ep connect unable to connect.\n"); diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c index e11c9c180f39..c1d5be4adf9c 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c @@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev, return NULL; } -static struct rtable *find_route(__be32 saddr, __be32 daddr, +static struct rtable *find_route(struct net_device *dev, + __be32 saddr, __be32 daddr, __be16 sport, __be16 dport) { struct rtable *rt; struct flowi fl = { - .oif = 0, + .oif = dev ? dev->ifindex : 0, .nl_u = { .ip4_u = { .daddr = daddr, @@ -1573,36 +1574,40 @@ out_err: * * return 0 if active open request is sent, < 0 otherwise. */ -int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin) +int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, + struct sockaddr_in *usin) { struct rtable *rt; - struct net_device *dev; struct cxgb3i_sdev_data *cdata; struct t3cdev *cdev; __be32 sipv4; int err; + c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); + if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; c3cn->daddr.sin_port = usin->sin_port; c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; - rt = find_route(c3cn->saddr.sin_addr.s_addr, + rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, c3cn->daddr.sin_addr.s_addr, c3cn->saddr.sin_port, c3cn->daddr.sin_port); if (rt == NULL) { - c3cn_conn_debug("NO route to 0x%x, port %u.\n", + c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n", c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port)); + ntohs(c3cn->daddr.sin_port), + dev ? dev->name : "any"); return -ENETUNREACH; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { - c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n", + c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n", c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port)); + ntohs(c3cn->daddr.sin_port), + dev ? dev->name : "any"); ip_rt_put(rt); return -ENETUNREACH; } diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.h b/drivers/scsi/cxgb3i/cxgb3i_offload.h index ebfca960c0a9..6a1d86b1fafe 100644 --- a/drivers/scsi/cxgb3i/cxgb3i_offload.h +++ b/drivers/scsi/cxgb3i/cxgb3i_offload.h @@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *); void cxgb3i_sdev_remove(struct t3cdev *); struct s3_conn *cxgb3i_c3cn_create(void); -int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *); +int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *, + struct sockaddr_in *); void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); void cxgb3i_c3cn_release(struct s3_conn *); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 43b8c51e98d0..fd0544f7da81 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev, struct rdac_dh_data *h = get_rdac_data(sdev); switch (sense_hdr->sense_key) { case NOT_READY: + if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) + /* LUN Not Ready - Logical Unit Not Ready and is in + * the process of becoming ready + * Just retry. + */ + return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) /* LUN Not Ready - Storage firmware incompatible * Manual code synchonisation required. diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 03e1926f40b5..e606b4829d44 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2"); /* fcoe host list */ LIST_HEAD(fcoe_hostlist); DEFINE_RWLOCK(fcoe_hostlist_lock); -DEFINE_TIMER(fcoe_timer, NULL, 0, 0); DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); /* Function Prototypes */ @@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); static int fcoe_hostlist_add(const struct fc_lport *); static int fcoe_hostlist_remove(const struct fc_lport *); -static int fcoe_check_wait_queue(struct fc_lport *); +static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); static int fcoe_device_notification(struct notifier_block *, ulong, void *); static void fcoe_dev_setup(void); static void fcoe_dev_cleanup(void); @@ -146,6 +145,7 @@ static int fcoe_lport_config(struct fc_lport *lp) lp->link_up = 0; lp->qfull = 0; lp->max_retry_count = 3; + lp->max_rport_retry_count = 3; lp->e_d_tov = 2 * 1000; /* FC-FS default */ lp->r_a_tov = 2 * 2 * 1000; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | @@ -167,6 +167,18 @@ static int fcoe_lport_config(struct fc_lport *lp) } /** + * fcoe_queue_timer() - fcoe queue timer + * @lp: the fc_lport pointer + * + * Calls fcoe_check_wait_queue on timeout + * + */ +static void fcoe_queue_timer(ulong lp) +{ + fcoe_check_wait_queue((struct fc_lport *)lp, NULL); +} + +/** * fcoe_netdev_config() - Set up netdev for SW FCoE * @lp : ptr to the fc_lport * @netdev : ptr to the associated netdevice struct @@ -236,6 +248,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) } skb_queue_head_init(&fc->fcoe_pending_queue); fc->fcoe_pending_queue_active = 0; + setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp); /* setup Source Mac Address */ memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr, @@ -386,6 +399,9 @@ static int fcoe_if_destroy(struct net_device *netdev) /* Free existing skbs */ fcoe_clean_pending_queue(lp); + /* Stop the timer */ + del_timer_sync(&fc->timer); + /* Free memory used by statistical counters */ fc_lport_free_stats(lp); @@ -988,7 +1004,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp) */ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) { - int wlen, rc = 0; + int wlen; u32 crc; struct ethhdr *eh; struct fcoe_crc_eof *cp; @@ -1021,8 +1037,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) sof = fr_sof(fp); eof = fr_eof(fp); - elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? - sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr); + elen = sizeof(struct ethhdr); hlen = sizeof(struct fcoe_hdr); tlen = sizeof(struct fcoe_crc_eof); wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; @@ -1107,18 +1122,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) /* send down to lld */ fr_dev(fp) = lp; if (fc->fcoe_pending_queue.qlen) - rc = fcoe_check_wait_queue(lp); - - if (rc == 0) - rc = fcoe_start_io(skb); - - if (rc) { - spin_lock_bh(&fc->fcoe_pending_queue.lock); - __skb_queue_tail(&fc->fcoe_pending_queue, skb); - spin_unlock_bh(&fc->fcoe_pending_queue.lock); - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - lp->qfull = 1; - } + fcoe_check_wait_queue(lp, skb); + else if (fcoe_start_io(skb)) + fcoe_check_wait_queue(lp, skb); return 0; } @@ -1268,32 +1274,6 @@ int fcoe_percpu_receive_thread(void *arg) } /** - * fcoe_watchdog() - fcoe timer callback - * @vp: - * - * This checks the pending queue length for fcoe and set lport qfull - * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the - * fcoe_hostlist. - * - * Returns: 0 for success - */ -void fcoe_watchdog(ulong vp) -{ - struct fcoe_softc *fc; - - read_lock(&fcoe_hostlist_lock); - list_for_each_entry(fc, &fcoe_hostlist, list) { - if (fc->ctlr.lp) - fcoe_check_wait_queue(fc->ctlr.lp); - } - read_unlock(&fcoe_hostlist_lock); - - fcoe_timer.expires = jiffies + (1 * HZ); - add_timer(&fcoe_timer); -} - - -/** * fcoe_check_wait_queue() - attempt to clear the transmit backlog * @lp: the fc_lport * @@ -1305,16 +1285,17 @@ void fcoe_watchdog(ulong vp) * The wait_queue is used when the skb transmit fails. skb will go * in the wait_queue which will be emptied by the timer function or * by the next skb transmit. - * - * Returns: 0 for success */ -static int fcoe_check_wait_queue(struct fc_lport *lp) +static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) { struct fcoe_softc *fc = lport_priv(lp); - struct sk_buff *skb; - int rc = -1; + int rc; spin_lock_bh(&fc->fcoe_pending_queue.lock); + + if (skb) + __skb_queue_tail(&fc->fcoe_pending_queue, skb); + if (fc->fcoe_pending_queue_active) goto out; fc->fcoe_pending_queue_active = 1; @@ -1340,23 +1321,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp) if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) lp->qfull = 0; + if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer)) + mod_timer(&fc->timer, jiffies + 2); fc->fcoe_pending_queue_active = 0; - rc = fc->fcoe_pending_queue.qlen; out: + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) + lp->qfull = 1; spin_unlock_bh(&fc->fcoe_pending_queue.lock); - return rc; + return; } /** * fcoe_dev_setup() - setup link change notification interface */ -static void fcoe_dev_setup() +static void fcoe_dev_setup(void) { register_netdevice_notifier(&fcoe_notifier); } /** - * fcoe_dev_setup() - cleanup link change notification interface + * fcoe_dev_cleanup() - cleanup link change notification interface */ static void fcoe_dev_cleanup(void) { @@ -1815,10 +1799,6 @@ static int __init fcoe_init(void) /* Setup link change notification */ fcoe_dev_setup(); - setup_timer(&fcoe_timer, fcoe_watchdog, 0); - - mod_timer(&fcoe_timer, jiffies + (10 * HZ)); - fcoe_if_init(); return 0; @@ -1844,9 +1824,6 @@ static void __exit fcoe_exit(void) fcoe_dev_cleanup(); - /* Stop the timer */ - del_timer_sync(&fcoe_timer); - /* releases the associated fcoe hosts */ list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) fcoe_if_destroy(fc->real_dev); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index 917aae886897..a1eb8c1988b0 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -61,6 +61,7 @@ struct fcoe_softc { struct packet_type fip_packet_type; struct sk_buff_head fcoe_pending_queue; u8 fcoe_pending_queue_active; + struct timer_list timer; /* queue timer */ struct fcoe_ctlr ctlr; }; diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index 62ba0f39c6bd..929411880e4b 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -213,7 +213,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) sol->desc.size.fd_size = htons(fcoe_size); skb_put(skb, sizeof(*sol)); - skb->protocol = htons(ETH_P_802_3); + skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); @@ -365,7 +365,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) } skb_put(skb, len); - skb->protocol = htons(ETH_P_802_3); + skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); @@ -424,7 +424,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, if (dtype != ELS_FLOGI) memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); - skb->protocol = htons(ETH_P_802_3); + skb->protocol = htons(ETH_P_FIP); skb_reset_mac_header(skb); skb_reset_network_header(skb); return 0; @@ -447,14 +447,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) u16 old_xid; u8 op; - if (fip->state == FIP_ST_NON_FIP) - return 0; - fh = (struct fc_frame_header *)skb->data; op = *(u8 *)(fh + 1); - switch (op) { - case ELS_FLOGI: + if (op == ELS_FLOGI) { old_xid = fip->flogi_oxid; fip->flogi_oxid = ntohs(fh->fh_ox_id); if (fip->state == FIP_ST_AUTO) { @@ -466,6 +462,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) fip->map_dest = 1; return 0; } + if (fip->state == FIP_ST_NON_FIP) + fip->map_dest = 1; + } + + if (fip->state == FIP_ST_NON_FIP) + return 0; + + switch (op) { + case ELS_FLOGI: op = FIP_DT_FLOGI; break; case ELS_FDISC: diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 32ef6b87d895..a84072865fc2 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -680,6 +680,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev, } lp->max_retry_count = fnic->config.flogi_retries; + lp->max_rport_retry_count = fnic->config.plogi_retries; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_CONF_COMPL); if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 59349a316e13..1258da34fbc2 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c @@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, struct Scsi_Host *host, gdth_ha_str *ha) { int size = 0,len = 0; + int hlen; off_t begin = 0,pos = 0; int id, i, j, k, sec, flag; int no_mdrv = 0, drv_no, is_mirr; @@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, if (reserve_list[0] == 0xff) strcpy(hrec, "--"); else { - sprintf(hrec, "%d", reserve_list[0]); + hlen = sprintf(hrec, "%d", reserve_list[0]); for (i = 1; i < MAX_RES_ARGS; i++) { if (reserve_list[i] == 0xff) break; - sprintf(hrec,"%s,%d", hrec, reserve_list[i]); + hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]); } } size = sprintf(buffer+len, diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index ea4abee7a2a9..b4b805e8d7db 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -110,7 +110,7 @@ static const struct { { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, - { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, @@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *); static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); static void ibmvfc_tgt_query_target(struct ibmvfc_target *); +static void ibmvfc_npiv_logout(struct ibmvfc_host *); static const char *unknown_error = "unknown error"; @@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd) int fc_rsp_len = rsp->fcp_rsp_len; if ((rsp->flags & FCP_RSP_LEN_VALID) && - ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || + ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || rsp->data.info.rsp_code)) return DID_ERROR << 16; @@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, case IBMVFC_TGT_ACTION_DEL_RPORT: break; default: + if (action == IBMVFC_TGT_ACTION_DEL_RPORT) + tgt->add_rport = 0; tgt->action = action; break; } @@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) vhost->action = action; break; + case IBMVFC_HOST_ACTION_LOGO_WAIT: + if (vhost->action == IBMVFC_HOST_ACTION_LOGO) + vhost->action = action; + break; case IBMVFC_HOST_ACTION_INIT_WAIT: if (vhost->action == IBMVFC_HOST_ACTION_INIT) vhost->action = action; @@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, switch (vhost->action) { case IBMVFC_HOST_ACTION_INIT_WAIT: case IBMVFC_HOST_ACTION_NONE: - case IBMVFC_HOST_ACTION_TGT_ADD: + case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: vhost->action = action; break; default: @@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) vhost->action = action; break; + case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_QUERY_TGTS: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: - case IBMVFC_HOST_ACTION_TGT_ADD: case IBMVFC_HOST_ACTION_NONE: default: vhost->action = action; @@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) } list_for_each_entry(tgt, &vhost->targets, queue) - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)crq->msgs); } @@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); /* Clean out the queue */ @@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) } /** - * __ibmvfc_reset_host - Reset the connection to the server (no locking) + * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ * @vhost: struct ibmvfc host to reset **/ -static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) +static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) { int rc; @@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) } /** - * ibmvfc_reset_host - Reset the connection to the server + * __ibmvfc_reset_host - Reset the connection to the server (no locking) * @vhost: struct ibmvfc host to reset **/ +static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) +{ + if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT && + !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO); + vhost->job_step = ibmvfc_npiv_logout; + wake_up(&vhost->work_wait_q); + } else + ibmvfc_hard_reset_host(vhost); +} + +/** + * ibmvfc_reset_host - Reset the connection to the server + * @vhost: ibmvfc host struct + **/ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) { unsigned long flags; @@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) * ibmvfc_retry_host_init - Retry host initialization if allowed * @vhost: ibmvfc host struct * + * Returns: 1 if init will be retried / 0 if not + * **/ -static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) +static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost) { + int retry = 0; + if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { vhost->delay_init = 1; if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { @@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) __ibmvfc_reset_host(vhost); - else + else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); + retry = 1; + } } wake_up(&vhost->work_wait_q); + return retry; } /** @@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) login_info->partition_num = vhost->partition_number; login_info->vfc_frame_version = 1; login_info->fcp_version = 3; + login_info->flags = IBMVFC_FLUSH_ON_HALT; if (vhost->client_migrated) - login_info->flags = IBMVFC_CLIENT_MIGRATED; + login_info->flags |= IBMVFC_CLIENT_MIGRATED; login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; login_info->capabilities = IBMVFC_CAN_MIGRATE; @@ -1452,6 +1485,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) } /** + * ibmvfc_relogin - Log back into the specified device + * @sdev: scsi device struct + * + **/ +static void ibmvfc_relogin(struct scsi_device *sdev) +{ + struct ibmvfc_host *vhost = shost_priv(sdev->host); + struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); + struct ibmvfc_target *tgt; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (rport == tgt->rport) { + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + break; + } + } + + ibmvfc_reinit_host(vhost); +} + +/** * ibmvfc_scsi_done - Handle responses from commands * @evt: ibmvfc event to be handled * @@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) - ibmvfc_reinit_host(evt->vhost); + ibmvfc_relogin(cmnd->device); if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) cmnd->result = (DID_ERROR << 16); @@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost) { const char *desc = ibmvfc_get_ae_desc(crq->event); + struct ibmvfc_target *tgt; ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); switch (crq->event) { - case IBMVFC_AE_LINK_UP: case IBMVFC_AE_RESUME: + switch (crq->link_state) { + case IBMVFC_AE_LS_LINK_DOWN: + ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + break; + case IBMVFC_AE_LS_LINK_DEAD: + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + break; + case IBMVFC_AE_LS_LINK_UP: + case IBMVFC_AE_LS_LINK_BOUNCED: + default: + vhost->events_to_log |= IBMVFC_AE_LINKUP; + vhost->delay_init = 1; + __ibmvfc_reset_host(vhost); + break; + }; + + break; + case IBMVFC_AE_LINK_UP: vhost->events_to_log |= IBMVFC_AE_LINKUP; vhost->delay_init = 1; __ibmvfc_reset_host(vhost); @@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, case IBMVFC_AE_SCN_NPORT: case IBMVFC_AE_SCN_GROUP: vhost->events_to_log |= IBMVFC_AE_RSCN; + ibmvfc_reinit_host(vhost); + break; case IBMVFC_AE_ELS_LOGO: case IBMVFC_AE_ELS_PRLO: case IBMVFC_AE_ELS_PLOGI: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (!crq->scsi_id && !crq->wwpn && !crq->node_name) + break; + if (crq->scsi_id && tgt->scsi_id != crq->scsi_id) + continue; + if (crq->wwpn && tgt->ids.port_name != crq->wwpn) + continue; + if (crq->node_name && tgt->ids.node_name != crq->node_name) + continue; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + } + ibmvfc_reinit_host(vhost); break; case IBMVFC_AE_LINK_DOWN: @@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) return; case IBMVFC_CRQ_XPORT_EVENT: vhost->state = IBMVFC_NO_CRQ; + vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); if (crq->format == IBMVFC_PARTITION_MIGRATED) { /* We need to re-setup the interpartition connection */ @@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) done = 1; } - if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) + if (vhost->scan_complete) done = 1; spin_unlock_irqrestore(shost->host_lock, flags); return done; @@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev, vhost->login_buf->resp.partition_name); } -static struct device_attribute ibmvfc_host_partition_name = { - .attr = { - .name = "partition_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_partition_name, -}; - static ssize_t ibmvfc_show_host_device_name(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev, vhost->login_buf->resp.device_name); } -static struct device_attribute ibmvfc_host_device_name = { - .attr = { - .name = "device_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_device_name, -}; - static ssize_t ibmvfc_show_host_loc_code(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev, vhost->login_buf->resp.port_loc_code); } -static struct device_attribute ibmvfc_host_loc_code = { - .attr = { - .name = "port_loc_code", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_loc_code, -}; - static ssize_t ibmvfc_show_host_drc_name(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev, vhost->login_buf->resp.drc_name); } -static struct device_attribute ibmvfc_host_drc_name = { - .attr = { - .name = "drc_name", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_drc_name, -}; - static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); } -static struct device_attribute ibmvfc_host_npiv_version = { - .attr = { - .name = "npiv_version", - .mode = S_IRUGO, - }, - .show = ibmvfc_show_host_npiv_version, -}; +static ssize_t ibmvfc_show_host_capabilities(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvfc_host *vhost = shost_priv(shost); + return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities); +} /** * ibmvfc_show_log_level - Show the adapter's error logging level @@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev, return strlen(buf); } -static struct device_attribute ibmvfc_log_level_attr = { - .attr = { - .name = "log_level", - .mode = S_IRUGO | S_IWUSR, - }, - .show = ibmvfc_show_log_level, - .store = ibmvfc_store_log_level -}; +static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL); +static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); +static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); +static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL); +static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); +static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL); +static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, + ibmvfc_show_log_level, ibmvfc_store_log_level); #ifdef CONFIG_SCSI_IBMVFC_TRACE /** @@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = { #endif static struct device_attribute *ibmvfc_attrs[] = { - &ibmvfc_host_partition_name, - &ibmvfc_host_device_name, - &ibmvfc_host_loc_code, - &ibmvfc_host_drc_name, - &ibmvfc_host_npiv_version, - &ibmvfc_log_level_attr, + &dev_attr_partition_name, + &dev_attr_device_name, + &dev_attr_port_loc_code, + &dev_attr_drc_name, + &dev_attr_npiv_version, + &dev_attr_capabilities, + &dev_attr_log_level, NULL }; @@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, * @tgt: ibmvfc target struct * @job_step: initialization job step * + * Returns: 1 if step will be retried / 0 if not + * **/ -static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, +static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, void (*job_step) (struct ibmvfc_target *)) { if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); wake_up(&tgt->vhost->work_wait_q); + return 0; } else ibmvfc_init_tgt(tgt, job_step); + return 1; } /* Defined in FC-LS */ @@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; struct ibmvfc_prli_svc_parms *parms = &rsp->parms; u32 status = rsp->common.status; - int index; + int index, level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); + tgt->add_rport = 1; } else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); } else if (prli_rsp[index].retry) @@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), - rsp->status, rsp->error, status); if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), + rsp->status, rsp->error, status); break; }; @@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; u32 status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); - if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); break; }; @@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "ADISC succeeded\n"); if (ibmvfc_adisc_needs_plogi(mad, tgt)) - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_FAILED: default: - tgt->need_login = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", @@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; u32 status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); - if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); + level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + + tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); break; }; @@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) } spin_unlock_irqrestore(vhost->host->host_lock, flags); - tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); + tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); if (!tgt) { dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", scsi_id); @@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; u32 mad_status = rsp->common.status; + int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: @@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); break; case IBMVFC_MAD_FAILED: - dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); - ibmvfc_retry_host_init(vhost); + level += ibmvfc_retry_host_init(vhost); + ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); break; case IBMVFC_MAD_DRIVER_FAILED: break; @@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) u32 mad_status = evt->xfer_iu->npiv_login.common.status; struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; unsigned int npiv_max_sectors; + int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_free_event(evt); break; case IBMVFC_MAD_FAILED: - dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - ibmvfc_retry_host_init(vhost); + level += ibmvfc_retry_host_init(vhost); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); + ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); ibmvfc_free_event(evt); return; case IBMVFC_MAD_CRQ_ERROR: @@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) return; } + vhost->logged_in = 1; npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", rsp->partition_name, rsp->device_name, rsp->port_loc_code, @@ -3636,6 +3702,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) }; /** + * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) +{ + struct ibmvfc_host *vhost = evt->vhost; + u32 mad_status = evt->xfer_iu->npiv_logout.common.status; + + ibmvfc_free_event(evt); + + switch (mad_status) { + case IBMVFC_MAD_SUCCESS: + if (list_empty(&vhost->sent) && + vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { + ibmvfc_init_host(vhost, 0); + return; + } + break; + case IBMVFC_MAD_FAILED: + case IBMVFC_MAD_NOT_SUPPORTED: + case IBMVFC_MAD_CRQ_ERROR: + case IBMVFC_MAD_DRIVER_FAILED: + default: + ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status); + break; + } + + ibmvfc_hard_reset_host(vhost); +} + +/** + * ibmvfc_npiv_logout - Issue an NPIV Logout + * @vhost: ibmvfc host struct + * + **/ +static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) +{ + struct ibmvfc_npiv_logout_mad *mad; + struct ibmvfc_event *evt; + + evt = ibmvfc_get_event(vhost); + ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT); + + mad = &evt->iu.npiv_logout; + memset(mad, 0, sizeof(*mad)); + mad->common.version = 1; + mad->common.opcode = IBMVFC_NPIV_LOGOUT; + mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad); + + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); + + if (!ibmvfc_send_event(evt, vhost, default_timeout)) + ibmvfc_dbg(vhost, "Sent NPIV logout\n"); + else + ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); +} + +/** * ibmvfc_dev_init_to_do - Is there target initialization work to do? * @vhost: ibmvfc host struct * @@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: case IBMVFC_HOST_ACTION_INIT_WAIT: + case IBMVFC_HOST_ACTION_LOGO_WAIT: return 0; case IBMVFC_HOST_ACTION_TGT_INIT: case IBMVFC_HOST_ACTION_QUERY_TGTS: @@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) return 0; return 1; + case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_ALLOC_TGTS: - case IBMVFC_HOST_ACTION_TGT_ADD: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: case IBMVFC_HOST_ACTION_QUERY: @@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) { struct ibmvfc_host *vhost = tgt->vhost; - struct fc_rport *rport = tgt->rport; + struct fc_rport *rport; unsigned long flags; - if (rport) { - tgt_dbg(tgt, "Setting rport roles\n"); - fc_remote_port_rolechg(rport, tgt->ids.roles); - spin_lock_irqsave(vhost->host->host_lock, flags); - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); + tgt_dbg(tgt, "Adding rport\n"); + rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); + spin_lock_irqsave(vhost->host->host_lock, flags); + + if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { + tgt_dbg(tgt, "Deleting rport\n"); + list_del(&tgt->queue); spin_unlock_irqrestore(vhost->host->host_lock, flags); + fc_remote_port_delete(rport); + del_timer_sync(&tgt->timer); + kref_put(&tgt->kref, ibmvfc_release_tgt); return; } - tgt_dbg(tgt, "Adding rport\n"); - rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); - spin_lock_irqsave(vhost->host->host_lock, flags); - tgt->rport = rport; - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); if (rport) { tgt_dbg(tgt, "rport add succeeded\n"); + tgt->rport = rport; rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; rport->supported_classes = 0; tgt->target_id = rport->scsi_target_id; @@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) vhost->events_to_log = 0; switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: + case IBMVFC_HOST_ACTION_LOGO_WAIT: case IBMVFC_HOST_ACTION_INIT_WAIT: break; + case IBMVFC_HOST_ACTION_LOGO: + vhost->job_step(vhost); + break; case IBMVFC_HOST_ACTION_INIT: BUG_ON(vhost->state != IBMVFC_INITIALIZING); if (vhost->delay_init) { @@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) if (vhost->state == IBMVFC_INITIALIZING) { if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { - ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); - vhost->init_retries = 0; - spin_unlock_irqrestore(vhost->host->host_lock, flags); - scsi_unblock_requests(vhost->host); + if (vhost->reinit) { + vhost->reinit = 0; + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + spin_unlock_irqrestore(vhost->host->host_lock, flags); + } else { + ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + wake_up(&vhost->init_wait_q); + schedule_work(&vhost->rport_add_work_q); + vhost->init_retries = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + scsi_unblock_requests(vhost->host); + } + return; } else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); @@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) if (!ibmvfc_dev_init_to_do(vhost)) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); break; - case IBMVFC_HOST_ACTION_TGT_ADD: - list_for_each_entry(tgt, &vhost->targets, queue) { - if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) { - spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_tgt_add_rport(tgt); - return; - } - } - - if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { - vhost->reinit = 0; - scsi_block_requests(vhost->host); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); - } else { - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); - wake_up(&vhost->init_wait_q); - } - break; default: break; }; @@ -4118,6 +4241,56 @@ nomem: } /** + * ibmvfc_rport_add_thread - Worker thread for rport adds + * @work: work struct + * + **/ +static void ibmvfc_rport_add_thread(struct work_struct *work) +{ + struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host, + rport_add_work_q); + struct ibmvfc_target *tgt; + struct fc_rport *rport; + unsigned long flags; + int did_work; + + ENTER; + spin_lock_irqsave(vhost->host->host_lock, flags); + do { + did_work = 0; + if (vhost->state != IBMVFC_ACTIVE) + break; + + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->add_rport) { + did_work = 1; + tgt->add_rport = 0; + kref_get(&tgt->kref); + rport = tgt->rport; + if (!rport) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_tgt_add_rport(tgt); + } else if (get_device(&rport->dev)) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + tgt_dbg(tgt, "Setting rport roles\n"); + fc_remote_port_rolechg(rport, tgt->ids.roles); + put_device(&rport->dev); + } + + kref_put(&tgt->kref, ibmvfc_release_tgt); + spin_lock_irqsave(vhost->host->host_lock, flags); + break; + } + } + } while(did_work); + + if (vhost->state == IBMVFC_ACTIVE) + vhost->scan_complete = 1; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + LEAVE; +} + +/** * ibmvfc_probe - Adapter hot plug add entry point * @vdev: vio device struct * @id: vio device id struct @@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) strcpy(vhost->partition_name, "UNKNOWN"); init_waitqueue_head(&vhost->work_wait_q); init_waitqueue_head(&vhost->init_wait_q); + INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); if ((rc = ibmvfc_alloc_mem(vhost))) goto free_scsi_host; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index ca1dcf7a7568..c2668d7d67f5 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.5" -#define IBMVFC_DRIVER_DATE "(March 19, 2009)" +#define IBMVFC_DRIVER_VERSION "1.0.6" +#define IBMVFC_DRIVER_DATE "(May 28, 2009)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -57,9 +57,10 @@ * Ensure we have resources for ERP and initialization: * 1 for ERP * 1 for initialization + * 1 for NPIV Logout * 2 for each discovery thread */ -#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) +#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) #define IBMVFC_MAD_SUCCESS 0x00 #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 @@ -127,6 +128,7 @@ enum ibmvfc_mad_types { IBMVFC_IMPLICIT_LOGOUT = 0x0040, IBMVFC_PASSTHRU = 0x0200, IBMVFC_TMF_MAD = 0x0100, + IBMVFC_NPIV_LOGOUT = 0x0800, }; struct ibmvfc_mad_common { @@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad { struct srp_direct_buf buffer; }__attribute__((packed, aligned (8))); +struct ibmvfc_npiv_logout_mad { + struct ibmvfc_mad_common common; +}__attribute__((packed, aligned (8))); + #define IBMVFC_MAX_NAME 256 struct ibmvfc_npiv_login { @@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp { #define IBMVFC_NATIVE_FC 0x01 #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; - u64 capabilites; + u64 capabilities; +#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -541,9 +548,17 @@ struct ibmvfc_crq_queue { dma_addr_t msg_token; }; +enum ibmvfc_ae_link_state { + IBMVFC_AE_LS_LINK_UP = 0x01, + IBMVFC_AE_LS_LINK_BOUNCED = 0x02, + IBMVFC_AE_LS_LINK_DOWN = 0x04, + IBMVFC_AE_LS_LINK_DEAD = 0x08, +}; + struct ibmvfc_async_crq { volatile u8 valid; - u8 pad[3]; + u8 link_state; + u8 pad[2]; u32 pad2; volatile u64 event; volatile u64 scsi_id; @@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue { union ibmvfc_iu { struct ibmvfc_mad_common mad_common; struct ibmvfc_npiv_login_mad npiv_login; + struct ibmvfc_npiv_logout_mad npiv_logout; struct ibmvfc_discover_targets discover_targets; struct ibmvfc_port_login plogi; struct ibmvfc_process_login prli; @@ -575,7 +591,6 @@ enum ibmvfc_target_action { IBMVFC_TGT_ACTION_NONE = 0, IBMVFC_TGT_ACTION_INIT, IBMVFC_TGT_ACTION_INIT_WAIT, - IBMVFC_TGT_ACTION_ADD_RPORT, IBMVFC_TGT_ACTION_DEL_RPORT, }; @@ -588,6 +603,7 @@ struct ibmvfc_target { int target_id; enum ibmvfc_target_action action; int need_login; + int add_rport; int init_retries; u32 cancel_key; struct ibmvfc_service_parms service_parms; @@ -627,6 +643,8 @@ struct ibmvfc_event_pool { enum ibmvfc_host_action { IBMVFC_HOST_ACTION_NONE = 0, + IBMVFC_HOST_ACTION_LOGO, + IBMVFC_HOST_ACTION_LOGO_WAIT, IBMVFC_HOST_ACTION_INIT, IBMVFC_HOST_ACTION_INIT_WAIT, IBMVFC_HOST_ACTION_QUERY, @@ -635,7 +653,6 @@ enum ibmvfc_host_action { IBMVFC_HOST_ACTION_ALLOC_TGTS, IBMVFC_HOST_ACTION_TGT_INIT, IBMVFC_HOST_ACTION_TGT_DEL_FAILED, - IBMVFC_HOST_ACTION_TGT_ADD, }; enum ibmvfc_host_state { @@ -682,6 +699,8 @@ struct ibmvfc_host { int client_migrated; int reinit; int delay_init; + int scan_complete; + int logged_in; int events_to_log; #define IBMVFC_AE_LINKUP 0x0001 #define IBMVFC_AE_LINKDOWN 0x0002 @@ -692,6 +711,7 @@ struct ibmvfc_host { void (*job_step) (struct ibmvfc_host *); struct task_struct *work_thread; struct tasklet_struct tasklet; + struct work_struct rport_add_work_q; wait_queue_head_t init_wait_q; wait_queue_head_t work_wait_q; }; @@ -707,6 +727,12 @@ struct ibmvfc_host { #define tgt_err(t, fmt, ...) \ dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) +#define tgt_log(t, level, fmt, ...) \ + do { \ + if ((t)->vhost->log_level >= level) \ + tgt_err(t, fmt, ##__VA_ARGS__); \ + } while (0) + #define ibmvfc_dbg(vhost, ...) \ DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index c9aa7611e408..11d2602ae88e 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -70,6 +70,7 @@ #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/delay.h> +#include <linux/of.h> #include <asm/firmware.h> #include <asm/vio.h> #include <asm/firmware.h> @@ -87,9 +88,15 @@ */ static int max_id = 64; static int max_channel = 3; -static int init_timeout = 5; +static int init_timeout = 300; +static int login_timeout = 60; +static int info_timeout = 30; +static int abort_timeout = 60; +static int reset_timeout = 60; static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; +static int fast_fail = 1; +static int client_reserve = 1; static struct scsi_transport_template *ibmvscsi_transport_template; @@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); module_param_named(max_requests, max_requests, int, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); +module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); +module_param_named(client_reserve, client_reserve, int, S_IRUGO ); +MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); /* ------------------------------------------------------------ * Routines for the event pool and event structs @@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, /* ------------------------------------------------------------ * Routines for driver initialization */ + /** - * adapter_info_rsp: - Handle response to MAD adapter info request - * @evt_struct: srp_event_struct with the response + * map_persist_bufs: - Pre-map persistent data for adapter logins + * @hostdata: ibmvscsi_host_data of host * - * Used as a "done" callback by when sending adapter_info. Gets called - * by ibmvscsi_handle_crq() -*/ -static void adapter_info_rsp(struct srp_event_struct *evt_struct) + * Map the capabilities and adapter info DMA buffers to avoid runtime failures. + * Return 1 on error, 0 on success. + */ +static int map_persist_bufs(struct ibmvscsi_host_data *hostdata) { - struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - dma_unmap_single(hostdata->dev, - evt_struct->iu.mad.adapter_info.buffer, - evt_struct->iu.mad.adapter_info.common.length, - DMA_BIDIRECTIONAL); - if (evt_struct->xfer_iu->mad.adapter_info.common.status) { - dev_err(hostdata->dev, "error %d getting adapter info\n", - evt_struct->xfer_iu->mad.adapter_info.common.status); - } else { - dev_info(hostdata->dev, "host srp version: %s, " - "host partition %s (%d), OS %d, max io %u\n", - hostdata->madapter_info.srp_version, - hostdata->madapter_info.partition_name, - hostdata->madapter_info.partition_number, - hostdata->madapter_info.os_type, - hostdata->madapter_info.port_max_txu[0]); - - if (hostdata->madapter_info.port_max_txu[0]) - hostdata->host->max_sectors = - hostdata->madapter_info.port_max_txu[0] >> 9; - - if (hostdata->madapter_info.os_type == 3 && - strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { - dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", - hostdata->madapter_info.srp_version); - dev_err(hostdata->dev, "limiting scatterlists to %d\n", - MAX_INDIRECT_BUFS); - hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; - } + hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + + if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) { + dev_err(hostdata->dev, "Unable to map capabilities buffer!\n"); + return 1; } + + hostdata->adapter_info_addr = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) { + dev_err(hostdata->dev, "Unable to map adapter info buffer!\n"); + dma_unmap_single(hostdata->dev, hostdata->caps_addr, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + return 1; + } + + return 0; } /** - * send_mad_adapter_info: - Sends the mad adapter info request - * and stores the result so it can be retrieved with - * sysfs. We COULD consider causing a failure if the - * returned SRP version doesn't match ours. - * @hostdata: ibmvscsi_host_data of host - * - * Returns zero if successful. -*/ -static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) + * unmap_persist_bufs: - Unmap persistent data needed for adapter logins + * @hostdata: ibmvscsi_host_data of host + * + * Unmap the capabilities and adapter info DMA buffers + */ +static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata) { - struct viosrp_adapter_info *req; - struct srp_event_struct *evt_struct; - unsigned long flags; - dma_addr_t addr; - - evt_struct = get_event_struct(&hostdata->pool); - if (!evt_struct) { - dev_err(hostdata->dev, - "couldn't allocate an event for ADAPTER_INFO_REQ!\n"); - return; - } - - init_event_struct(evt_struct, - adapter_info_rsp, - VIOSRP_MAD_FORMAT, - init_timeout); - - req = &evt_struct->iu.mad.adapter_info; - memset(req, 0x00, sizeof(*req)); - - req->common.type = VIOSRP_ADAPTER_INFO_TYPE; - req->common.length = sizeof(hostdata->madapter_info); - req->buffer = addr = dma_map_single(hostdata->dev, - &hostdata->madapter_info, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); + dma_unmap_single(hostdata->dev, hostdata->caps_addr, + sizeof(hostdata->caps), DMA_BIDIRECTIONAL); - if (dma_mapping_error(hostdata->dev, req->buffer)) { - if (!firmware_has_feature(FW_FEATURE_CMO)) - dev_err(hostdata->dev, - "Unable to map request_buffer for " - "adapter_info!\n"); - free_event_struct(&hostdata->pool, evt_struct); - return; - } - - spin_lock_irqsave(hostdata->host->host_lock, flags); - if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) { - dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); - dma_unmap_single(hostdata->dev, - addr, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); - } - spin_unlock_irqrestore(hostdata->host->host_lock, flags); -}; + dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr, + sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); +} /** * login_rsp: - Handle response to SRP login request @@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) } dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); - - if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) - dev_err(hostdata->dev, "Invalid request_limit.\n"); + hostdata->client_migrated = 0; /* Now we know what the real request-limit is. * This value is set rather than added to request_limit because @@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct) /* If we had any pending I/Os, kick them */ scsi_unblock_requests(hostdata->host); - - send_mad_adapter_info(hostdata); - return; } /** * send_srp_login: - Sends the srp login * @hostdata: ibmvscsi_host_data of host - * + * * Returns zero if successful. */ static int send_srp_login(struct ibmvscsi_host_data *hostdata) @@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) unsigned long flags; struct srp_login_req *login; struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); - if (!evt_struct) { - dev_err(hostdata->dev, "couldn't allocate an event for login req!\n"); - return FAILED; - } - init_event_struct(evt_struct, - login_rsp, - VIOSRP_SRP_FORMAT, - init_timeout); + BUG_ON(!evt_struct); + init_event_struct(evt_struct, login_rsp, + VIOSRP_SRP_FORMAT, login_timeout); login = &evt_struct->iu.srp.login_req; - memset(login, 0x00, sizeof(struct srp_login_req)); + memset(login, 0, sizeof(*login)); login->opcode = SRP_LOGIN_REQ; login->req_it_iu_len = sizeof(union srp_iu); login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; - + spin_lock_irqsave(hostdata->host->host_lock, flags); /* Start out with a request limit of 0, since this is negotiated in * the login request we are just sending and login requests always @@ -962,13 +911,241 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) */ atomic_set(&hostdata->request_limit, 0); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); dev_info(hostdata->dev, "sent SRP login\n"); return rc; }; /** + * capabilities_rsp: - Handle response to MAD adapter capabilities request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending adapter_info. + */ +static void capabilities_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + + if (evt_struct->xfer_iu->mad.capabilities.common.status) { + dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", + evt_struct->xfer_iu->mad.capabilities.common.status); + } else { + if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) + dev_info(hostdata->dev, "Partition migration not supported\n"); + + if (client_reserve) { + if (hostdata->caps.reserve.common.server_support == + SERVER_SUPPORTS_CAP) + dev_info(hostdata->dev, "Client reserve enabled\n"); + else + dev_info(hostdata->dev, "Client reserve not supported\n"); + } + } + + send_srp_login(hostdata); +} + +/** + * send_mad_capabilities: - Sends the mad capabilities request + * and stores the result so it can be retrieved with + * @hostdata: ibmvscsi_host_data of host + */ +static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) +{ + struct viosrp_capabilities *req; + struct srp_event_struct *evt_struct; + unsigned long flags; + struct device_node *of_node = hostdata->dev->archdata.of_node; + const char *location; + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, capabilities_rsp, + VIOSRP_MAD_FORMAT, info_timeout); + + req = &evt_struct->iu.mad.capabilities; + memset(req, 0, sizeof(*req)); + + hostdata->caps.flags = CAP_LIST_SUPPORTED; + if (hostdata->client_migrated) + hostdata->caps.flags |= CLIENT_MIGRATED; + + strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), + sizeof(hostdata->caps.name)); + hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0'; + + location = of_get_property(of_node, "ibm,loc-code", NULL); + location = location ? location : dev_name(hostdata->dev); + strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); + hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; + + req->common.type = VIOSRP_CAPABILITIES_TYPE; + req->buffer = hostdata->caps_addr; + + hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; + hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); + hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; + hostdata->caps.migration.ecl = 1; + + if (client_reserve) { + hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; + hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); + hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; + hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; + req->common.length = sizeof(hostdata->caps); + } else + req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) + dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n"); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +}; + +/** + * fast_fail_rsp: - Handle response to MAD enable fast fail + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending enable fast fail. Gets called + * by ibmvscsi_handle_crq() + */ +static void fast_fail_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; + + if (status == VIOSRP_MAD_NOT_SUPPORTED) + dev_err(hostdata->dev, "fast_fail not supported in server\n"); + else if (status == VIOSRP_MAD_FAILED) + dev_err(hostdata->dev, "fast_fail request failed\n"); + else if (status != VIOSRP_MAD_SUCCESS) + dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); + + send_mad_capabilities(hostdata); +} + +/** + * init_host - Start host initialization + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. + */ +static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) +{ + int rc; + unsigned long flags; + struct viosrp_fast_fail *fast_fail_mad; + struct srp_event_struct *evt_struct; + + if (!fast_fail) { + send_mad_capabilities(hostdata); + return 0; + } + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout); + + fast_fail_mad = &evt_struct->iu.mad.fast_fail; + memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); + fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; + fast_fail_mad->common.length = sizeof(*fast_fail_mad); + + spin_lock_irqsave(hostdata->host->host_lock, flags); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); + return rc; +} + +/** + * adapter_info_rsp: - Handle response to MAD adapter info request + * @evt_struct: srp_event_struct with the response + * + * Used as a "done" callback by when sending adapter_info. Gets called + * by ibmvscsi_handle_crq() +*/ +static void adapter_info_rsp(struct srp_event_struct *evt_struct) +{ + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + + if (evt_struct->xfer_iu->mad.adapter_info.common.status) { + dev_err(hostdata->dev, "error %d getting adapter info\n", + evt_struct->xfer_iu->mad.adapter_info.common.status); + } else { + dev_info(hostdata->dev, "host srp version: %s, " + "host partition %s (%d), OS %d, max io %u\n", + hostdata->madapter_info.srp_version, + hostdata->madapter_info.partition_name, + hostdata->madapter_info.partition_number, + hostdata->madapter_info.os_type, + hostdata->madapter_info.port_max_txu[0]); + + if (hostdata->madapter_info.port_max_txu[0]) + hostdata->host->max_sectors = + hostdata->madapter_info.port_max_txu[0] >> 9; + + if (hostdata->madapter_info.os_type == 3 && + strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { + dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", + hostdata->madapter_info.srp_version); + dev_err(hostdata->dev, "limiting scatterlists to %d\n", + MAX_INDIRECT_BUFS); + hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; + } + } + + enable_fast_fail(hostdata); +} + +/** + * send_mad_adapter_info: - Sends the mad adapter info request + * and stores the result so it can be retrieved with + * sysfs. We COULD consider causing a failure if the + * returned SRP version doesn't match ours. + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. +*/ +static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) +{ + struct viosrp_adapter_info *req; + struct srp_event_struct *evt_struct; + unsigned long flags; + + evt_struct = get_event_struct(&hostdata->pool); + BUG_ON(!evt_struct); + + init_event_struct(evt_struct, + adapter_info_rsp, + VIOSRP_MAD_FORMAT, + info_timeout); + + req = &evt_struct->iu.mad.adapter_info; + memset(req, 0x00, sizeof(*req)); + + req->common.type = VIOSRP_ADAPTER_INFO_TYPE; + req->common.length = sizeof(hostdata->madapter_info); + req->buffer = hostdata->adapter_info_addr; + + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) + dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +}; + +/** + * init_adapter: Start virtual adapter initialization sequence + * + */ +static void init_adapter(struct ibmvscsi_host_data *hostdata) +{ + send_mad_adapter_info(hostdata); +} + +/** * sync_completion: Signal that a synchronous command has completed * Note that after returning from this call, the evt_struct is freed. * the caller waiting on this completion shouldn't touch the evt_struct @@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, - init_timeout); + abort_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; @@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) evt->sync_srp = &srp_rsp; init_completion(&evt->comp); - rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; @@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, - init_timeout); + reset_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; @@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) evt->sync_srp = &srp_rsp; init_completion(&evt->comp); - rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; @@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, if ((rc = ibmvscsi_ops->send_crq(hostdata, 0xC002000000000000LL, 0)) == 0) { /* Now login */ - send_srp_login(hostdata); + init_adapter(hostdata); } else { dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); } @@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, dev_info(hostdata->dev, "partner initialization complete\n"); /* Now login */ - send_srp_login(hostdata); + init_adapter(hostdata); break; default: dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); @@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, if (crq->format == 0x06) { /* We need to re-setup the interpartition connection */ dev_info(hostdata->dev, "Re-enabling adapter!\n"); + hostdata->client_migrated = 1; purge_requests(hostdata, DID_REQUEUE); if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata)) || @@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_event_struct(evt_struct, sync_completion, VIOSRP_MAD_FORMAT, - init_timeout); + info_timeout); host_config = &evt_struct->iu.mad.host_config; @@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_completion(&evt_struct->comp); spin_lock_irqsave(hostdata->host->host_lock, flags); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (rc == 0) wait_for_completion(&evt_struct->comp); @@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) spin_lock_irqsave(shost->host_lock, lock_flags); if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; - blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); } scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); spin_unlock_irqrestore(shost->host_lock, lock_flags); @@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) /* ------------------------------------------------------------ * sysfs attributes */ +static ssize_t show_host_vhost_loc(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n", + hostdata->caps.loc); + return len; +} + +static struct device_attribute ibmvscsi_host_vhost_loc = { + .attr = { + .name = "vhost_loc", + .mode = S_IRUGO, + }, + .show = show_host_vhost_loc, +}; + +static ssize_t show_host_vhost_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ibmvscsi_host_data *hostdata = shost_priv(shost); + int len; + + len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n", + hostdata->caps.name); + return len; +} + +static struct device_attribute ibmvscsi_host_vhost_name = { + .attr = { + .name = "vhost_name", + .mode = S_IRUGO, + }, + .show = show_host_vhost_name, +}; + static ssize_t show_host_srp_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = { }; static struct device_attribute *ibmvscsi_attrs[] = { + &ibmvscsi_host_vhost_loc, + &ibmvscsi_host_vhost_name, &ibmvscsi_host_srp_version, &ibmvscsi_host_partition_name, &ibmvscsi_host_partition_number, @@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) atomic_set(&hostdata->request_limit, -1); hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; + if (map_persist_bufs(hostdata)) { + dev_err(&vdev->dev, "couldn't map persistent buffers\n"); + goto persist_bufs_failed; + } + rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); if (rc != 0 && rc != H_RESOURCE) { dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); @@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) host->max_lun = 8; host->max_id = max_id; host->max_channel = max_channel; + host->max_cmd_len = 16; if (scsi_add_host(hostdata->host, hostdata->dev)) goto add_host_failed; @@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) init_pool_failed: ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); init_crq_failed: + unmap_persist_bufs(hostdata); + persist_bufs_failed: scsi_host_put(host); scsi_host_alloc_failed: return -1; @@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; + unmap_persist_bufs(hostdata); release_event_pool(&hostdata->pool, hostdata); ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h index 2d4339d5e16e..76425303def0 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/drivers/scsi/ibmvscsi/ibmvscsi.h @@ -90,6 +90,7 @@ struct event_pool { /* all driver data associated with a host adapter */ struct ibmvscsi_host_data { atomic_t request_limit; + int client_migrated; struct device *dev; struct event_pool pool; struct crq_queue queue; @@ -97,6 +98,9 @@ struct ibmvscsi_host_data { struct list_head sent; struct Scsi_Host *host; struct mad_adapter_info_data madapter_info; + struct capabilities caps; + dma_addr_t caps_addr; + dma_addr_t adapter_info_addr; }; /* routines for managing a command/response queue */ diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index 204604501ad8..2cd735d1d196 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h @@ -37,6 +37,7 @@ #define SRP_VERSION "16.a" #define SRP_MAX_IU_LEN 256 +#define SRP_MAX_LOC_LEN 32 union srp_iu { struct srp_login_req login_req; @@ -86,7 +87,37 @@ enum viosrp_mad_types { VIOSRP_EMPTY_IU_TYPE = 0x01, VIOSRP_ERROR_LOG_TYPE = 0x02, VIOSRP_ADAPTER_INFO_TYPE = 0x03, - VIOSRP_HOST_CONFIG_TYPE = 0x04 + VIOSRP_HOST_CONFIG_TYPE = 0x04, + VIOSRP_CAPABILITIES_TYPE = 0x05, + VIOSRP_ENABLE_FAST_FAIL = 0x08, +}; + +enum viosrp_mad_status { + VIOSRP_MAD_SUCCESS = 0x00, + VIOSRP_MAD_NOT_SUPPORTED = 0xF1, + VIOSRP_MAD_FAILED = 0xF7, +}; + +enum viosrp_capability_type { + MIGRATION_CAPABILITIES = 0x01, + RESERVATION_CAPABILITIES = 0x02, +}; + +enum viosrp_capability_support { + SERVER_DOES_NOT_SUPPORTS_CAP = 0x0, + SERVER_SUPPORTS_CAP = 0x01, + SERVER_CAP_DATA = 0x02, +}; + +enum viosrp_reserve_type { + CLIENT_RESERVE_SCSI_2 = 0x01, +}; + +enum viosrp_capability_flag { + CLIENT_MIGRATED = 0x01, + CLIENT_RECONNECT = 0x02, + CAP_LIST_SUPPORTED = 0x04, + CAP_LIST_DATA = 0x08, }; /* @@ -127,11 +158,46 @@ struct viosrp_host_config { u64 buffer; }; +struct viosrp_fast_fail { + struct mad_common common; +}; + +struct viosrp_capabilities { + struct mad_common common; + u64 buffer; +}; + +struct mad_capability_common { + u32 cap_type; + u16 length; + u16 server_support; +}; + +struct mad_reserve_cap { + struct mad_capability_common common; + u32 type; +}; + +struct mad_migration_cap { + struct mad_capability_common common; + u32 ecl; +}; + +struct capabilities{ + u32 flags; + char name[SRP_MAX_LOC_LEN]; + char loc[SRP_MAX_LOC_LEN]; + struct mad_migration_cap migration; + struct mad_reserve_cap reserve; +}; + union mad_iu { struct viosrp_empty_iu empty_iu; struct viosrp_error_log error_log; struct viosrp_adapter_info adapter_info; struct viosrp_host_config host_config; + struct viosrp_fast_fail fast_fail; + struct viosrp_capabilities capabilities; }; union viosrp_iu { diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index dd689ded8609..0f8bc772b112 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) ioa_cfg->sdt_state = ABORT_DUMP; ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; ioa_cfg->in_ioa_bringdown = 1; + ioa_cfg->allow_cmds = 0; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } @@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev) * Return value: * none **/ -static void ipr_remove(struct pci_dev *pdev) +static void __devexit ipr_remove(struct pci_dev *pdev) { struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); @@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = { .name = IPR_NAME, .id_table = ipr_pci_table, .probe = ipr_probe, - .remove = ipr_remove, + .remove = __devexit_p(ipr_remove), .shutdown = ipr_shutdown, .err_handler = &ipr_err_handler, }; diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 992af05aacf1..7af9bceb8aa9 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) atomic_inc(&mp->stats.xid_not_found); goto out; } + if (ep->esb_stat & ESB_ST_COMPLETE) { + atomic_inc(&mp->stats.xid_not_found); + goto out; + } if (ep->rxid == FC_XID_UNKNOWN) ep->rxid = ntohs(fh->fh_rx_id); if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 521f996f9b13..ad8b747837b0 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; break; case FC_CMD_ABORTED: - sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; + sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; break; case FC_CMD_TIME_OUT: sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 747d73c5c8af..7bfbff7e0efb 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) if (PTR_ERR(fp) == -FC_EX_CLOSED) return fc_rport_error(rport, fp); - if (rdata->retries < rdata->local_port->max_retry_count) { + if (rdata->retries < rdata->local_port->max_rport_retry_count) { FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", PTR_ERR(fp), fc_rport_state(rport)); rdata->retries++; @@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport) } EXPORT_SYMBOL(fc_rport_init); -int fc_setup_rport() +int fc_setup_rport(void) { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) @@ -1339,7 +1339,7 @@ int fc_setup_rport() } EXPORT_SYMBOL(fc_setup_rport); -void fc_destroy_rport() +void fc_destroy_rport(void) { destroy_workqueue(rport_event_queue); } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index e72b4ad47d35..59908aead531 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn) struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); - queue_work(ihost->workq, &conn->xmitwork); + if (ihost->workq) + queue_work(ihost->workq, &conn->xmitwork); } EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); @@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) * if the window closed with IO queued, then kick the * xmit thread */ - if (!list_empty(&session->leadconn->xmitqueue) || - !list_empty(&session->leadconn->mgmtqueue)) { - if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) - iscsi_conn_queue_work(session->leadconn); - } + if (!list_empty(&session->leadconn->cmdqueue) || + !list_empty(&session->leadconn->mgmtqueue)) + iscsi_conn_queue_work(session->leadconn); } } EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); @@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) itt_t itt; int rc; - rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); - if (rc) - return rc; + if (conn->session->tt->alloc_pdu) { + rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); + if (rc) + return rc; + } hdr = (struct iscsi_cmd *) task->hdr; itt = hdr->itt; memset(hdr, 0, sizeof(*hdr)); @@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) return -EIO; task->state = ISCSI_TASK_RUNNING; - list_move_tail(&task->running, &conn->run_list); conn->scsicmd_pdus_cnt++; ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " @@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) } /** - * iscsi_complete_command - finish a task + * iscsi_free_task - free a task * @task: iscsi cmd task * * Must be called with session lock. * This function returns the scsi command to scsi-ml or cleans * up mgmt tasks then returns the task to the pool. */ -static void iscsi_complete_command(struct iscsi_task *task) +static void iscsi_free_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; + ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", + task->itt, task->state, task->sc); + session->tt->cleanup_task(task); - list_del_init(&task->running); - task->state = ISCSI_TASK_COMPLETED; + task->state = ISCSI_TASK_FREE; task->sc = NULL; - - if (conn->task == task) - conn->task = NULL; /* * login task is preallocated so do not free */ @@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task) __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); - if (conn->ping_task == task) - conn->ping_task = NULL; - if (sc) { task->sc = NULL; /* SCSI eh reuses commands to verify us */ @@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task); static void __iscsi_put_task(struct iscsi_task *task) { if (atomic_dec_and_test(&task->refcount)) - iscsi_complete_command(task); + iscsi_free_task(task); } void iscsi_put_task(struct iscsi_task *task) @@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task) } EXPORT_SYMBOL_GPL(iscsi_put_task); +/** + * iscsi_complete_task - finish a task + * @task: iscsi cmd task + * @state: state to complete task with + * + * Must be called with session lock. + */ +static void iscsi_complete_task(struct iscsi_task *task, int state) +{ + struct iscsi_conn *conn = task->conn; + + ISCSI_DBG_SESSION(conn->session, + "complete task itt 0x%x state %d sc %p\n", + task->itt, task->state, task->sc); + if (task->state == ISCSI_TASK_COMPLETED || + task->state == ISCSI_TASK_ABRT_TMF || + task->state == ISCSI_TASK_ABRT_SESS_RECOV) + return; + WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); + task->state = state; + + if (!list_empty(&task->running)) + list_del_init(&task->running); + + if (conn->task == task) + conn->task = NULL; + + if (conn->ping_task == task) + conn->ping_task = NULL; + + /* release get from queueing */ + __iscsi_put_task(task); +} + /* - * session lock must be held + * session lock must be held and if not called for a task that is + * still pending or from the xmit thread, then xmit thread must + * be suspended. */ -static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, - int err) +static void fail_scsi_task(struct iscsi_task *task, int err) { + struct iscsi_conn *conn = task->conn; struct scsi_cmnd *sc; + int state; + /* + * if a command completes and we get a successful tmf response + * we will hit this because the scsi eh abort code does not take + * a ref to the task. + */ sc = task->sc; if (!sc) return; - if (task->state == ISCSI_TASK_PENDING) + if (task->state == ISCSI_TASK_PENDING) { /* * cmd never made it to the xmit thread, so we should not count * the cmd in the sequencing */ conn->session->queued_cmdsn--; + /* it was never sent so just complete like normal */ + state = ISCSI_TASK_COMPLETED; + } else if (err == DID_TRANSPORT_DISRUPTED) + state = ISCSI_TASK_ABRT_SESS_RECOV; + else + state = ISCSI_TASK_ABRT_TMF; - sc->result = err; + sc->result = err << 16; if (!scsi_bidi_cmnd(sc)) scsi_set_resid(sc, scsi_bufflen(sc)); else { @@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, scsi_in(sc)->resid = scsi_in(sc)->length; } - if (conn->task == task) - conn->task = NULL; - /* release ref from queuecommand */ - __iscsi_put_task(task); + iscsi_complete_task(task, state); } static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, @@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, session->state = ISCSI_STATE_LOGGING_OUT; task->state = ISCSI_TASK_RUNNING; - list_move_tail(&task->running, &conn->mgmt_run_list); ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, task->data_count); @@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_session *session = conn->session; + struct iscsi_host *ihost = shost_priv(session->host); struct iscsi_task *task; itt_t itt; @@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, */ task = conn->login_task; else { + if (session->state != ISCSI_STATE_LOGGED_IN) + return NULL; + BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); @@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, atomic_set(&task->refcount, 1); task->conn = conn; task->sc = NULL; + INIT_LIST_HEAD(&task->running); + task->state = ISCSI_TASK_PENDING; if (data_size) { memcpy(task->data, data, data_size); @@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } else task->data_count = 0; - if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { - iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " - "pdu for mgmt task.\n"); - goto requeue_task; + if (conn->session->tt->alloc_pdu) { + if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { + iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " + "pdu for mgmt task.\n"); + goto free_task; + } } + itt = task->hdr->itt; task->hdr_len = sizeof(struct iscsi_hdr); memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); @@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, task->conn->session->age); } - INIT_LIST_HEAD(&task->running); - list_add_tail(&task->running, &conn->mgmtqueue); - - if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { + if (!ihost->workq) { if (iscsi_prep_mgmt_task(conn, task)) goto free_task; if (session->tt->xmit_task(task)) goto free_task; - - } else + } else { + list_add_tail(&task->running, &conn->mgmtqueue); iscsi_conn_queue_work(conn); + } return task; free_task: __iscsi_put_task(task); return NULL; - -requeue_task: - if (task != conn->login_task) - __kfifo_put(session->cmdpool.queue, (void*)&task, - sizeof(void*)); - return NULL; } int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, @@ -701,11 +742,10 @@ invalid_datalen: sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } out: - ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n", + ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - - __iscsi_put_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } /** @@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) return; + iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr); sc->result = (DID_OK << 16) | rhdr->cmd_status; conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | @@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } + ISCSI_DBG_SESSION(conn->session, "data in with status done " + "[sc %p res %d itt 0x%x]\n", + sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - __iscsi_put_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); } static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) @@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * * The session lock must be held. */ -static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) +struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) { struct iscsi_session *session = conn->session; int i; @@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) return session->cmds[i]; } +EXPORT_SYMBOL_GPL(iscsi_itt_to_task); /** * __iscsi_complete_pdu - complete pdu @@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } iscsi_tmf_rsp(conn, hdr); - __iscsi_put_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); break; case ISCSI_OP_NOOP_IN: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); @@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, goto recv_pdu; mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); - __iscsi_put_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); break; default: rc = ISCSI_ERR_BAD_OPCODE; @@ -989,7 +1034,7 @@ out: recv_pdu: if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; - __iscsi_put_task(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); return rc; } EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); @@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; - list_move_tail(&task->running, &conn->requeue); + /* + * this may be on the requeue list already if the xmit_task callout + * is handling the r2ts while we are adding new ones + */ + if (list_empty(&task->running)) + list_add_tail(&task->running, &conn->requeue); iscsi_conn_queue_work(conn); } EXPORT_SYMBOL_GPL(iscsi_requeue_task); @@ -1206,6 +1256,7 @@ check_mgmt: while (!list_empty(&conn->mgmtqueue)) { conn->task = list_entry(conn->mgmtqueue.next, struct iscsi_task, running); + list_del_init(&conn->task->running); if (iscsi_prep_mgmt_task(conn, conn->task)) { __iscsi_put_task(conn->task); conn->task = NULL; @@ -1217,23 +1268,26 @@ check_mgmt: } /* process pending command queue */ - while (!list_empty(&conn->xmitqueue)) { + while (!list_empty(&conn->cmdqueue)) { if (conn->tmf_state == TMF_QUEUED) break; - conn->task = list_entry(conn->xmitqueue.next, + conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task, running); + list_del_init(&conn->task->running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { - fail_command(conn, conn->task, DID_IMM_RETRY << 16); + fail_scsi_task(conn->task, DID_IMM_RETRY); continue; } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { if (rc == -ENOMEM) { + list_add_tail(&conn->task->running, + &conn->cmdqueue); conn->task = NULL; goto again; } else - fail_command(conn, conn->task, DID_ABORT << 16); + fail_scsi_task(conn->task, DID_ABORT); continue; } rc = iscsi_xmit_task(conn); @@ -1260,8 +1314,8 @@ check_mgmt: conn->task = list_entry(conn->requeue.next, struct iscsi_task, running); + list_del_init(&conn->task->running); conn->task->state = ISCSI_TASK_RUNNING; - list_move_tail(conn->requeue.next, &conn->run_list); rc = iscsi_xmit_task(conn); if (rc) goto again; @@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct iscsi_cls_session *cls_session; struct Scsi_Host *host; + struct iscsi_host *ihost; int reason = 0; struct iscsi_session *session; struct iscsi_conn *conn; @@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) sc->SCp.ptr = NULL; host = sc->device->host; + ihost = shost_priv(host); spin_unlock(host->host_lock); cls_session = starget_to_session(scsi_target(sc->device)); @@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) goto fault; } - /* - * ISCSI_STATE_FAILED is a temp. state. The recovery - * code will decide what is best to do with command queued - * during this time - */ - if (session->state != ISCSI_STATE_LOGGED_IN && - session->state != ISCSI_STATE_FAILED) { + if (session->state != ISCSI_STATE_LOGGED_IN) { /* * to handle the race between when we set the recovery state * and block the session we requeue here (commands could @@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) * up because the block code is not locked) */ switch (session->state) { + case ISCSI_STATE_FAILED: case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; - goto reject; + sc->result = DID_IMM_RETRY << 16; + break; case ISCSI_STATE_LOGGING_OUT: reason = FAILURE_SESSION_LOGGING_OUT; - goto reject; + sc->result = DID_IMM_RETRY << 16; + break; case ISCSI_STATE_RECOVERY_FAILED: reason = FAILURE_SESSION_RECOVERY_TIMEOUT; sc->result = DID_TRANSPORT_FAILFAST << 16; @@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) reason = FAILURE_OOM; goto reject; } - list_add_tail(&task->running, &conn->xmitqueue); - if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { + if (!ihost->workq) { reason = iscsi_prep_scsi_cmd_pdu(task); if (reason) { if (reason == -ENOMEM) { @@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) reason = FAILURE_SESSION_NOT_READY; goto prepd_reject; } - } else + } else { + list_add_tail(&task->running, &conn->cmdqueue); iscsi_conn_queue_work(conn); + } session->queued_cmdsn++; spin_unlock(&session->lock); @@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) prepd_reject: sc->scsi_done = NULL; - iscsi_complete_command(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); reject: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", @@ -1439,7 +1493,7 @@ reject: prepd_fault: sc->scsi_done = NULL; - iscsi_complete_command(task); + iscsi_complete_task(task, ISCSI_TASK_COMPLETED); fault: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", @@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, * Fail commands. session lock held and recv side suspended and xmit * thread flushed */ -static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, - int error) +static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, + int error) { - struct iscsi_task *task, *tmp; - - if (conn->task) { - if (lun == -1 || - (conn->task->sc && conn->task->sc->device->lun == lun)) - conn->task = NULL; - } + struct iscsi_task *task; + int i; - /* flush pending */ - list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { - if (lun == task->sc->device->lun || lun == -1) { - ISCSI_DBG_SESSION(conn->session, - "failing pending sc %p itt 0x%x\n", - task->sc, task->itt); - fail_command(conn, task, error << 16); - } - } + for (i = 0; i < conn->session->cmds_max; i++) { + task = conn->session->cmds[i]; + if (!task->sc || task->state == ISCSI_TASK_FREE) + continue; - list_for_each_entry_safe(task, tmp, &conn->requeue, running) { - if (lun == task->sc->device->lun || lun == -1) { - ISCSI_DBG_SESSION(conn->session, - "failing requeued sc %p itt 0x%x\n", - task->sc, task->itt); - fail_command(conn, task, error << 16); - } - } + if (lun != -1 && lun != task->sc->device->lun) + continue; - /* fail all other running */ - list_for_each_entry_safe(task, tmp, &conn->run_list, running) { - if (lun == task->sc->device->lun || lun == -1) { - ISCSI_DBG_SESSION(conn->session, - "failing in progress sc %p itt 0x%x\n", - task->sc, task->itt); - fail_command(conn, task, error << 16); - } + ISCSI_DBG_SESSION(conn->session, + "failing sc %p itt 0x%x state %d\n", + task->sc, task->itt, task->state); + fail_scsi_task(task, error); } } @@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn) struct iscsi_host *ihost = shost_priv(shost); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) + if (ihost->workq) flush_workqueue(ihost->workq); } EXPORT_SYMBOL_GPL(iscsi_suspend_tx); @@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx); static void iscsi_start_tx(struct iscsi_conn *conn) { clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) - iscsi_conn_queue_work(conn); + iscsi_conn_queue_work(conn); +} + +/* + * We want to make sure a ping is in flight. It has timed out. + * And we are not busy processing a pdu that is making + * progress but got started before the ping and is taking a while + * to complete so the ping is just stuck behind it in a queue. + */ +static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) +{ + if (conn->ping_task && + time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + + (conn->ping_timeout * HZ), jiffies)) + return 1; + else + return 0; } static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) @@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) * if the ping timedout then we are in the middle of cleaning up * and can let the iscsi eh handle it */ - if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + - (conn->ping_timeout * HZ), jiffies)) + if (iscsi_has_ping_timed_out(conn)) { rc = BLK_EH_RESET_TIMER; + goto done; + } /* * if we are about to check the transport then give the command * more time */ if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), - jiffies)) + jiffies)) { rc = BLK_EH_RESET_TIMER; + goto done; + } + /* if in the middle of checking the transport then give us more time */ if (conn->ping_task) rc = BLK_EH_RESET_TIMER; @@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data) recv_timeout *= HZ; last_recv = conn->last_recv; - if (conn->ping_task && - time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), - jiffies)) { + + if (iscsi_has_ping_timed_out(conn)) { iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " - "expired, last rx %lu, last ping %lu, " - "now %lu\n", conn->ping_timeout, last_recv, - conn->last_ping, jiffies); + "expired, recv timeout %d, last rx %lu, " + "last ping %lu, now %lu\n", + conn->ping_timeout, conn->recv_timeout, + last_recv, conn->last_ping, jiffies); spin_unlock(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return; @@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; + ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc); + mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); /* @@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) sc->SCp.phase != session->age) { spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); + ISCSI_DBG_SESSION(session, "failing abort due to dropped " + "session.\n"); return FAILED; } @@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) } if (task->state == ISCSI_TASK_PENDING) { - fail_command(conn, task, DID_ABORT << 16); + fail_scsi_task(task, DID_ABORT); goto success; } @@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) * then sent more data for the cmd. */ spin_lock(&session->lock); - fail_command(conn, task, DID_ABORT << 16); + fail_scsi_task(task, DID_ABORT); conn->tmf_state = TMF_INITIAL; spin_unlock(&session->lock); iscsi_start_tx(conn); @@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) iscsi_suspend_tx(conn); spin_lock_bh(&session->lock); - fail_all_commands(conn, sc->device->lun, DID_ERROR); + fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); conn->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->lock); @@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, if (cmd_task_size) task->dd_data = &task[1]; task->itt = cmd_i; + task->state = ISCSI_TASK_FREE; INIT_LIST_HEAD(&task->running); } @@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, conn->transport_timer.data = (unsigned long)conn; conn->transport_timer.function = iscsi_check_transport_timeouts; - INIT_LIST_HEAD(&conn->run_list); - INIT_LIST_HEAD(&conn->mgmt_run_list); INIT_LIST_HEAD(&conn->mgmtqueue); - INIT_LIST_HEAD(&conn->xmitqueue); + INIT_LIST_HEAD(&conn->cmdqueue); INIT_LIST_HEAD(&conn->requeue); INIT_WORK(&conn->xmitwork, iscsi_xmitworker); @@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) EXPORT_SYMBOL_GPL(iscsi_conn_start); static void -flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) +fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) { - struct iscsi_task *task, *tmp; + struct iscsi_task *task; + int i, state; - /* handle pending */ - list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { - ISCSI_DBG_SESSION(session, "flushing pending mgmt task " - "itt 0x%x\n", task->itt); - /* release ref from prep task */ - __iscsi_put_task(task); - } + for (i = 0; i < conn->session->cmds_max; i++) { + task = conn->session->cmds[i]; + if (task->sc) + continue; - /* handle running */ - list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { - ISCSI_DBG_SESSION(session, "flushing running mgmt task " - "itt 0x%x\n", task->itt); - /* release ref from prep task */ - __iscsi_put_task(task); - } + if (task->state == ISCSI_TASK_FREE) + continue; + + ISCSI_DBG_SESSION(conn->session, + "failing mgmt itt 0x%x state %d\n", + task->itt, task->state); + state = ISCSI_TASK_ABRT_SESS_RECOV; + if (task->state == ISCSI_TASK_PENDING) + state = ISCSI_TASK_COMPLETED; + iscsi_complete_task(task, state); - conn->task = NULL; + } } static void iscsi_start_session_recovery(struct iscsi_session *session, @@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, { int old_stop_stage; - del_timer_sync(&conn->transport_timer); - mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); if (conn->stop_stage == STOP_CONN_TERM) { @@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, session->state = ISCSI_STATE_TERMINATE; else if (conn->stop_stage != STOP_CONN_RECOVER) session->state = ISCSI_STATE_IN_RECOVERY; + spin_unlock_bh(&session->lock); + + del_timer_sync(&conn->transport_timer); + iscsi_suspend_tx(conn); + spin_lock_bh(&session->lock); old_stop_stage = conn->stop_stage; conn->stop_stage = flag; conn->c_stage = ISCSI_CONN_STOPPED; spin_unlock_bh(&session->lock); - iscsi_suspend_tx(conn); /* * for connection level recovery we should not calculate * header digest. conn->hdr_size used for optimization @@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, * flush queues. */ spin_lock_bh(&session->lock); - if (flag == STOP_CONN_RECOVER) - fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); - else - fail_all_commands(conn, -1, DID_ERROR); - flush_control_queues(session, conn); + fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); + fail_mgmt_tasks(session, conn); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); } @@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, } EXPORT_SYMBOL_GPL(iscsi_conn_bind); +static int iscsi_switch_str_param(char **param, char *new_val_buf) +{ + char *new_val; + + if (*param) { + if (!strcmp(*param, new_val_buf)) + return 0; + } + + new_val = kstrdup(new_val_buf, GFP_NOIO); + if (!new_val) + return -ENOMEM; + + kfree(*param); + *param = new_val; + return 0; +} int iscsi_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) @@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, sscanf(buf, "%u", &conn->exp_statsn); break; case ISCSI_PARAM_USERNAME: - kfree(session->username); - session->username = kstrdup(buf, GFP_KERNEL); - if (!session->username) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->username, buf); case ISCSI_PARAM_USERNAME_IN: - kfree(session->username_in); - session->username_in = kstrdup(buf, GFP_KERNEL); - if (!session->username_in) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->username_in, buf); case ISCSI_PARAM_PASSWORD: - kfree(session->password); - session->password = kstrdup(buf, GFP_KERNEL); - if (!session->password) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->password, buf); case ISCSI_PARAM_PASSWORD_IN: - kfree(session->password_in); - session->password_in = kstrdup(buf, GFP_KERNEL); - if (!session->password_in) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->password_in, buf); case ISCSI_PARAM_TARGET_NAME: - /* this should not change between logins */ - if (session->targetname) - break; - - session->targetname = kstrdup(buf, GFP_KERNEL); - if (!session->targetname) - return -ENOMEM; - break; + return iscsi_switch_str_param(&session->targetname, buf); case ISCSI_PARAM_TPGT: sscanf(buf, "%d", &session->tpgt); break; @@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, sscanf(buf, "%d", &conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: - /* - * this is the address returned in discovery so it should - * not change between logins. - */ - if (conn->persistent_address) - break; - - conn->persistent_address = kstrdup(buf, GFP_KERNEL); - if (!conn->persistent_address) - return -ENOMEM; - break; + return iscsi_switch_str_param(&conn->persistent_address, buf); case ISCSI_PARAM_IFACE_NAME: - if (!session->ifacename) - session->ifacename = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&session->ifacename, buf); case ISCSI_PARAM_INITIATOR_NAME: - if (!session->initiatorname) - session->initiatorname = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&session->initiatorname, buf); default: return -ENOSYS; } @@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, len = sprintf(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: - if (!session->initiatorname) - len = sprintf(buf, "%s\n", "unknown"); - else - len = sprintf(buf, "%s\n", session->initiatorname); + len = sprintf(buf, "%s\n", session->initiatorname); break; default: return -ENOSYS; @@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - if (!ihost->netdev) - len = sprintf(buf, "%s\n", "default"); - else - len = sprintf(buf, "%s\n", ihost->netdev); + len = sprintf(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: - if (!ihost->hwaddress) - len = sprintf(buf, "%s\n", "default"); - else - len = sprintf(buf, "%s\n", ihost->hwaddress); + len = sprintf(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - if (!ihost->initiatorname) - len = sprintf(buf, "%s\n", "unknown"); - else - len = sprintf(buf, "%s\n", ihost->initiatorname); + len = sprintf(buf, "%s\n", ihost->initiatorname); break; case ISCSI_HOST_PARAM_IPADDRESS: - if (!strlen(ihost->local_address)) - len = sprintf(buf, "%s\n", "unknown"); - else - len = sprintf(buf, "%s\n", - ihost->local_address); + len = sprintf(buf, "%s\n", ihost->local_address); break; default: return -ENOSYS; @@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - if (!ihost->netdev) - ihost->netdev = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&ihost->netdev, buf); case ISCSI_HOST_PARAM_HWADDRESS: - if (!ihost->hwaddress) - ihost->hwaddress = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&ihost->hwaddress, buf); case ISCSI_HOST_PARAM_INITIATOR_NAME: - if (!ihost->initiatorname) - ihost->initiatorname = kstrdup(buf, GFP_KERNEL); - break; + return iscsi_switch_str_param(&ihost->initiatorname, buf); default: return -ENOSYS; } diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index b579ca9f4836..2bc07090321d 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task) struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_r2t_info *r2t; - /* nothing to do for mgmt or pending tasks */ - if (!task->sc || task->state == ISCSI_TASK_PENDING) + /* nothing to do for mgmt */ + if (!task->sc) return; /* flush task's r2t queues */ @@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) int datasn = be32_to_cpu(rhdr->datasn); unsigned total_in_length = scsi_in(task->sc)->length; - iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); + /* + * lib iscsi will update this in the completion handling if there + * is status. + */ + if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) + iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); + if (tcp_conn->in.datalen == 0) return 0; @@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, int rc = 0; ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); + /* + * Update for each skb instead of pdu, because over slow networks a + * data_in's data could take a while to read in. We also want to + * account for r2ts. + */ + conn->last_recv = jiffies; if (unlikely(conn->suspend_rx)) { ISCSI_DBG_TCP(conn, "Rx suspended!\n"); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 1105f9a111ba..540569849099 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -23,6 +23,13 @@ struct lpfc_sli2_slim; +#define LPFC_PCI_DEV_LP 0x1 +#define LPFC_PCI_DEV_OC 0x2 + +#define LPFC_SLI_REV2 2 +#define LPFC_SLI_REV3 3 +#define LPFC_SLI_REV4 4 + #define LPFC_MAX_TARGET 4096 /* max number of targets supported */ #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els requests */ @@ -98,9 +105,11 @@ struct lpfc_dma_pool { }; struct hbq_dmabuf { + struct lpfc_dmabuf hbuf; struct lpfc_dmabuf dbuf; uint32_t size; uint32_t tag; + struct lpfc_rcqe rcqe; }; /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ @@ -134,7 +143,10 @@ typedef struct lpfc_vpd { } rev; struct { #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd2 :24; /* Reserved */ + uint32_t rsvd3 :19; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t csah : 1; /* Configure Synchronous Abort Handling */ @@ -152,7 +164,10 @@ typedef struct lpfc_vpd { uint32_t csah : 1; /* Configure Synchronous Abort Handling */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ - uint32_t rsvd2 :24; /* Reserved */ + uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd3 :19; /* Reserved */ #endif } sli3Feat; } lpfc_vpd_t; @@ -264,8 +279,8 @@ enum hba_state { }; struct lpfc_vport { - struct list_head listentry; struct lpfc_hba *phba; + struct list_head listentry; uint8_t port_type; #define LPFC_PHYSICAL_PORT 1 #define LPFC_NPIV_PORT 2 @@ -273,6 +288,9 @@ struct lpfc_vport { enum discovery_state port_state; uint16_t vpi; + uint16_t vfi; + uint8_t vfi_state; +#define LPFC_VFI_REGISTERED 0x1 uint32_t fc_flag; /* FC flags */ /* Several of these flags are HBA centric and should be moved to @@ -385,6 +403,9 @@ struct lpfc_vport { #endif uint8_t stat_data_enabled; uint8_t stat_data_blocked; + struct list_head rcv_buffer_list; + uint32_t vport_flag; +#define STATIC_VPORT 1 }; struct hbq_s { @@ -420,8 +441,66 @@ enum intr_type_t { }; struct lpfc_hba { + /* SCSI interface function jump table entries */ + int (*lpfc_new_scsi_buf) + (struct lpfc_vport *, int); + struct lpfc_scsi_buf * (*lpfc_get_scsi_buf) + (struct lpfc_hba *); + int (*lpfc_scsi_prep_dma_buf) + (struct lpfc_hba *, struct lpfc_scsi_buf *); + void (*lpfc_scsi_unprep_dma_buf) + (struct lpfc_hba *, struct lpfc_scsi_buf *); + void (*lpfc_release_scsi_buf) + (struct lpfc_hba *, struct lpfc_scsi_buf *); + void (*lpfc_rampdown_queue_depth) + (struct lpfc_hba *); + void (*lpfc_scsi_prep_cmnd) + (struct lpfc_vport *, struct lpfc_scsi_buf *, + struct lpfc_nodelist *); + int (*lpfc_scsi_prep_task_mgmt_cmd) + (struct lpfc_vport *, struct lpfc_scsi_buf *, + unsigned int, uint8_t); + + /* IOCB interface function jump table entries */ + int (*__lpfc_sli_issue_iocb) + (struct lpfc_hba *, uint32_t, + struct lpfc_iocbq *, uint32_t); + void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, + struct lpfc_iocbq *); + int (*lpfc_hba_down_post)(struct lpfc_hba *phba); + + + IOCB_t * (*lpfc_get_iocb_from_iocbq) + (struct lpfc_iocbq *); + void (*lpfc_scsi_cmd_iocb_cmpl) + (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); + + /* MBOX interface function jump table entries */ + int (*lpfc_sli_issue_mbox) + (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); + /* Slow-path IOCB process function jump table entries */ + void (*lpfc_sli_handle_slow_ring_event) + (struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + uint32_t mask); + /* INIT device interface function jump table entries */ + int (*lpfc_sli_hbq_to_firmware) + (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *); + int (*lpfc_sli_brdrestart) + (struct lpfc_hba *); + int (*lpfc_sli_brdready) + (struct lpfc_hba *, uint32_t); + void (*lpfc_handle_eratt) + (struct lpfc_hba *); + void (*lpfc_stop_port) + (struct lpfc_hba *); + + + /* SLI4 specific HBA data structure */ + struct lpfc_sli4_hba sli4_hba; + struct lpfc_sli sli; - uint32_t sli_rev; /* SLI2 or SLI3 */ + uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ + uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */ uint32_t sli3_options; /* Mask of enabled SLI3 options */ #define LPFC_SLI3_HBQ_ENABLED 0x01 #define LPFC_SLI3_NPIV_ENABLED 0x02 @@ -429,6 +508,7 @@ struct lpfc_hba { #define LPFC_SLI3_CRP_ENABLED 0x08 #define LPFC_SLI3_INB_ENABLED 0x10 #define LPFC_SLI3_BG_ENABLED 0x20 +#define LPFC_SLI3_DSS_ENABLED 0x40 uint32_t iocb_cmd_size; uint32_t iocb_rsp_size; @@ -442,8 +522,13 @@ struct lpfc_hba { uint32_t hba_flag; /* hba generic flags */ #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ - -#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ +#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ +#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ +#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ +#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ +#define FCP_XRI_ABORT_EVENT 0x20 +#define ELS_XRI_ABORT_EVENT 0x40 +#define ASYNC_EVENT 0x80 struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; @@ -502,6 +587,9 @@ struct lpfc_hba { uint32_t cfg_poll; uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; + uint32_t cfg_fcp_imax; + uint32_t cfg_fcp_wq_count; + uint32_t cfg_fcp_eq_count; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -511,6 +599,8 @@ struct lpfc_hba { uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; uint32_t cfg_enable_bg; + uint32_t cfg_enable_fip; + uint32_t cfg_log_verbose; lpfc_vpd_t vpd; /* vital product data */ @@ -526,11 +616,12 @@ struct lpfc_hba { unsigned long data_flags; uint32_t hbq_in_use; /* HBQs in use flag */ - struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ + struct list_head rb_pend_list; /* Received buffers to be processed */ uint32_t hbq_count; /* Count of configured HBQs */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ + unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ void __iomem *slim_memmap_p; /* Kernel memory mapped address for PCI BAR0 */ @@ -593,7 +684,8 @@ struct lpfc_hba { /* pci_mem_pools */ struct pci_pool *lpfc_scsi_dma_buf_pool; struct pci_pool *lpfc_mbuf_pool; - struct pci_pool *lpfc_hbq_pool; + struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ + struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; @@ -609,6 +701,14 @@ struct lpfc_hba { struct lpfc_vport *pport; /* physical lpfc_vport pointer */ uint16_t max_vpi; /* Maximum virtual nports */ #define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ + uint16_t max_vports; /* + * For IOV HBAs max_vpi can change + * after a reset. max_vports is max + * number of vports present. This can + * be greater than max_vpi. + */ + uint16_t vpi_base; + uint16_t vfi_base; unsigned long *vpi_bmask; /* vpi allocation table */ /* Data structure used by fabric iocb scheduler */ @@ -667,6 +767,11 @@ struct lpfc_hba { /* Maximum number of events that can be outstanding at any time*/ #define LPFC_MAX_EVT_COUNT 512 atomic_t fast_event_count; + struct lpfc_fcf fcf; + uint8_t fc_map[3]; + uint8_t valid_vlan; + uint16_t vlan_id; + struct list_head fcf_conn_rec_list; }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c14f0cbdb125..d73e677201f8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -30,8 +30,10 @@ #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost) return -ENOMEM; memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmboxq->mb.mbxCommand = MBX_DOWN_LINK; - pmboxq->mb.mbxOwner = OWN_HOST; + pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; + pmboxq->u.mb.mbxOwner = OWN_HOST; mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); - if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { + if ((mbxstatus == MBX_SUCCESS) && + (pmboxq->u.mb.mbxStatus == 0 || + pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); @@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba, uint32_t *mrpi, uint32_t *arpi, uint32_t *mvpi, uint32_t *avpi) { - struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_mbx_read_config *rd_config; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; @@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba, */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || - (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) + (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) @@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba, return 0; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmb = &pmboxq->mb; + pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_CONFIG; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = MBX_NOT_FINISHED; else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba, return 0; } - if (mrpi) - *mrpi = pmb->un.varRdConfig.max_rpi; - if (arpi) - *arpi = pmb->un.varRdConfig.avail_rpi; - if (mxri) - *mxri = pmb->un.varRdConfig.max_xri; - if (axri) - *axri = pmb->un.varRdConfig.avail_xri; - if (mvpi) - *mvpi = pmb->un.varRdConfig.max_vpi; - if (avpi) - *avpi = pmb->un.varRdConfig.avail_vpi; + if (phba->sli_rev == LPFC_SLI_REV4) { + rd_config = &pmboxq->u.mqe.un.rd_config; + if (mrpi) + *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); + if (arpi) + *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - + phba->sli4_hba.max_cfg_param.rpi_used; + if (mxri) + *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); + if (axri) + *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - + phba->sli4_hba.max_cfg_param.xri_used; + if (mvpi) + *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + if (avpi) + *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - + phba->sli4_hba.max_cfg_param.vpi_used; + } else { + if (mrpi) + *mrpi = pmb->un.varRdConfig.max_rpi; + if (arpi) + *arpi = pmb->un.varRdConfig.avail_rpi; + if (mxri) + *mxri = pmb->un.varRdConfig.max_xri; + if (axri) + *axri = pmb->un.varRdConfig.avail_xri; + if (mvpi) + *mvpi = pmb->un.varRdConfig.max_vpi; + if (avpi) + *avpi = pmb->un.varRdConfig.avail_vpi; + } mempool_free(pmboxq, phba->mbox_mem_pool); return 1; @@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. # You can set a bit mask to record specific types of verbose messages: -# -# LOG_ELS 0x1 ELS events -# LOG_DISCOVERY 0x2 Link discovery events -# LOG_MBOX 0x4 Mailbox events -# LOG_INIT 0x8 Initialization events -# LOG_LINK_EVENT 0x10 Link events -# LOG_FCP 0x40 FCP traffic history -# LOG_NODE 0x80 Node table events -# LOG_BG 0x200 BlockBuard events -# LOG_MISC 0x400 Miscellaneous events -# LOG_SLI 0x800 SLI events -# LOG_FCP_ERROR 0x1000 Only log FCP errors -# LOG_LIBDFC 0x2000 LIBDFC events -# LOG_ALL_MSG 0xffff LOG all messages +# See lpfc_logmsh.h for definitions. */ -LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, +LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, "Verbose logging bit-mask"); /* @@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6) static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, lpfc_topology_show, lpfc_topology_store); +/** + * lpfc_static_vport_show: Read callback function for + * lpfc_static_vport sysfs file. + * @dev: Pointer to class device object. + * @attr: device attribute structure. + * @buf: Data buffer. + * + * This function is the read call back function for + * lpfc_static_vport sysfs file. The lpfc_static_vport + * sysfs file report the mageability of the vport. + **/ +static ssize_t +lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + if (vport->vport_flag & STATIC_VPORT) + sprintf(buf, "1\n"); + else + sprintf(buf, "0\n"); + + return strlen(buf); +} + +/* + * Sysfs attribute to control the statistical data collection. + */ +static DEVICE_ATTR(lpfc_static_vport, S_IRUGO, + lpfc_static_vport_show, NULL); /** * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file @@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, if (vports == NULL) return -ENOMEM; - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(v_shost->host_lock); /* Block and reset data collection */ @@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, phba->bucket_base = base; phba->bucket_step = step; - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); /* Unblock data collection */ @@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, if (vports == NULL) return -ENOMEM; - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->stat_data_blocked = 1; @@ -2844,15 +2885,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, /* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature -# 0 = MSI disabled +# 0 = MSI disabled (default) # 1 = MSI enabled -# 2 = MSI-X enabled (default) -# Value range is [0,2]. Default value is 2. +# 2 = MSI-X enabled +# Value range is [0,2]. Default value is 0. */ -LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " +LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); /* +# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second +# +# Value range is [636,651042]. Default value is 10000. +*/ +LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, + "Set the maximum number of fast-path FCP interrupts per second"); + +/* +# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues +# +# Value range is [1,31]. Default value is 4. +*/ +LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, + "Set the number of fast-path FCP work queues, if possible"); + +/* +# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues +# +# Value range is [1,7]. Default value is 1. +*/ +LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, + "Set the number of fast-path FCP event queues, if possible"); + +/* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled # 1 = HBA resets enabled (default) @@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); +/* +# lpfc_enable_fip: When set, FIP is required to start discovery. If not +# set, the driver will add an FCF record manually if the port has no +# FCF records available and start discovery. +# Value range is [0,1]. Default value is 1 (enabled) +*/ +LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery"); + /* # lpfc_prot_mask: i @@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, + &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_ack0, @@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, + &dev_attr_lpfc_fcp_imax, + &dev_attr_lpfc_fcp_wq_count, + &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, @@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, + &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_restrict_login, @@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_enable_da_id, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, + &dev_attr_lpfc_static_vport, NULL, }; @@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, } } - memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, + memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off, buf, count); phba->sysfs_mbox.offset = off + count; @@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int rc; + MAILBOX_t *pmb; if (off > MAILBOX_CMD_SIZE) return -ERANGE; @@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (off == 0 && phba->sysfs_mbox.state == SMBOX_WRITING && phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { - - switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { + pmb = &phba->sysfs_mbox.mbox->u.mb; + switch (pmb->mbxCommand) { /* Offline only */ case MBX_INIT_LINK: case MBX_DOWN_LINK: @@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (!(vport->fc_flag & FC_OFFLINE_MODE)) { printk(KERN_WARNING "mbox_read:Command 0x%x " "is illegal in on-line state\n", - phba->sysfs_mbox.mbox->mb.mbxCommand); + pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; @@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, case MBX_CONFIG_PORT: case MBX_RUN_BIU_DIAG: printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", - phba->sysfs_mbox.mbox->mb.mbxCommand); + pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; default: printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", - phba->sysfs_mbox.mbox->mb.mbxCommand); + pmb->mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; @@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, * or RESTART mailbox commands until the HBA is restarted. */ if (phba->pport->stopped && - phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && - phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && - phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && - phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) + pmb->mbxCommand != MBX_DUMP_MEMORY && + pmb->mbxCommand != MBX_RESTART && + pmb->mbxCommand != MBX_WRITE_VPARMS && + pmb->mbxCommand != MBX_WRITE_WWN) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "1259 mbox: Issued mailbox cmd " "0x%x while in stopped state.\n", - phba->sysfs_mbox.mbox->mb.mbxCommand); + pmb->mbxCommand); phba->sysfs_mbox.mbox->vport = vport; @@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, } if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ + (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox (phba, @@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, - lpfc_mbox_tmo_val(phba, - phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); + lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ); spin_lock_irq(&phba->hbalock); } @@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, return -EAGAIN; } - memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); + memcpy(buf, (uint8_t *) &pmb + off, count); phba->sysfs_mbox.offset = off + count; @@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost) case LA_8GHZ_LINK: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; + case LA_10GHZ_LINK: + fc_host_speed(shost) = FC_PORTSPEED_10GBIT; + break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; @@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost) */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || - (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) + (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) return NULL; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) @@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost) return NULL; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmb = &pmboxq->mb; + pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) return; memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); - pmb = &pmboxq->mb; + pmb = &pmboxq->u.mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmb->un.varWords[0] = 0x1; /* reset request */ @@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); } +/** + * lpfc_hba_log_verbose_init - Set hba's log verbose level + * @phba: Pointer to lpfc_hba struct. + * + * This function is called by the lpfc_get_cfgparam() routine to set the + * module lpfc_log_verbose into the @phba cfg_log_verbose for use with + * log messsage according to the module's lpfc_log_verbose parameter setting + * before hba port or vport created. + **/ +static void +lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) +{ + phba->cfg_log_verbose = verbose; +} + struct fc_function_template lpfc_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, @@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_use_msi_init(phba, lpfc_use_msi); + lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); + lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_enable_bg_init(phba, lpfc_enable_bg); @@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); - /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size - * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); - - if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); - } - - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); + lpfc_enable_fip_init(phba, lpfc_enable_fip); + lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); + return; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index f88ce3f26190..d2a922997c0f 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); struct fc_rport; void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); +int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); @@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, - LPFC_MBOXQ_t *, uint32_t); +int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, + LPFC_MBOXQ_t *, uint32_t); void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); -void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); +void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rpis(struct lpfc_vport *, int); int lpfc_linkdown(struct lpfc_hba *); +void lpfc_linkdown_port(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, @@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); +int lpfc_issue_fabric_reglogin(struct lpfc_vport *); int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, @@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_offline_prep(struct lpfc_hba *); void lpfc_offline(struct lpfc_hba *); +void lpfc_reset_hba(struct lpfc_hba *); int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli_queue_setup(struct lpfc_hba *); void lpfc_handle_eratt(struct lpfc_hba *); void lpfc_handle_latt(struct lpfc_hba *); -irqreturn_t lpfc_intr_handler(int, void *); -irqreturn_t lpfc_sp_intr_handler(int, void *); -irqreturn_t lpfc_fp_intr_handler(int, void *); +irqreturn_t lpfc_sli_intr_handler(int, void *); +irqreturn_t lpfc_sli_sp_intr_handler(int, void *); +irqreturn_t lpfc_sli_fp_intr_handler(int, void *); +irqreturn_t lpfc_sli4_intr_handler(int, void *); +irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); +irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); @@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); +void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_mbox_dev_check(struct lpfc_hba *); int lpfc_mbox_tmo_val(struct lpfc_hba *, int); +void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); +void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); +void lpfc_init_vpi(struct lpfcMboxq *, uint16_t); +void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); +void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); +void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, uint32_t , LPFC_MBOXQ_t *); struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); +struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); +void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); +void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, + uint16_t); +void lpfc_unregister_unused_fcf(struct lpfc_hba *); -int lpfc_mem_alloc(struct lpfc_hba *); +int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); +void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_stop_vport_timers(struct lpfc_vport *); void lpfc_poll_timeout(unsigned long ptr); @@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t); +void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_reset_barrier(struct lpfc_hba * phba); int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); @@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); -int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); +void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); int lpfc_sli_check_eratt(struct lpfc_hba *); -int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, +void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); +int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, +int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); @@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); -int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, +int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, struct lpfc_iocbq *, struct lpfc_iocbq *, uint32_t); void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, @@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); const char* lpfc_info(struct Scsi_Host *); int lpfc_scan_finished(struct Scsi_Host *, unsigned long); +int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t); +int lpfc_api_table_setup(struct lpfc_hba *, uint8_t); + void lpfc_get_cfgparam(struct lpfc_hba *); void lpfc_get_vport_cfgparam(struct lpfc_vport *); int lpfc_alloc_sysfs_attr(struct lpfc_vport *); @@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); +void lpfc_create_static_vport(struct lpfc_hba *); +void lpfc_stop_hba_timers(struct lpfc_hba *); +void lpfc_stop_port(struct lpfc_hba *); +void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); +int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); +void lpfc_start_fdiscs(struct lpfc_hba *phba); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 #define HBA_EVENT_LINK_DOWN 3 + diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 896c7b0351e5..1dbccfd3d022 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -32,8 +32,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, uint32_t tmo, uint8_t retry) { struct lpfc_hba *phba = vport->phba; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; IOCB_t *icmd; struct lpfc_iocbq *geniocb; int rc; @@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; geniocb->vport = vport; geniocb->retry = retry; - rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0); if (rc == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, geniocb); @@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) case LA_8GHZ_LINK: ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; break; + case LA_10GHZ_LINK: + ae->un.PortSpeed = HBA_PORTSPEED_10GBIT; + break; default: ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN; @@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) uint8_t *fwname; if (vp->rev.rBit) { - if (psli->sli_flag & LPFC_SLI2_ACTIVE) + if (psli->sli_flag & LPFC_SLI_ACTIVE) rev = vp->rev.sli2FwRev; else rev = vp->rev.sli1FwRev; @@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) } b4 = (rev & 0x0000000f); - if (psli->sli_flag & LPFC_SLI2_ACTIVE) + if (psli->sli_flag & LPFC_SLI_ACTIVE) fwname = vp->rev.sli2FwName; else fwname = vp->rev.sli1FwName; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 52be5644e07a..2b02b1fb39a0 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2008 Emulex. All rights reserved. * + * Copyright (C) 2007-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -33,8 +33,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) struct lpfc_dmabuf *d_buf; struct hbq_dmabuf *hbq_buf; + if (phba->sli_rev != 3) + return 0; cnt = LPFC_HBQINFO_SIZE; spin_lock_irq(&phba->hbalock); @@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) pring->next_cmdidx, pring->local_getidx, pring->flag, pgpp->rspPutInx, pring->numRiocb); } - word0 = readl(phba->HAregaddr); - word1 = readl(phba->CAregaddr); - word2 = readl(phba->HSregaddr); - word3 = readl(phba->HCregaddr); - len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", - word0, word1, word2, word3); + + if (phba->sli_rev <= LPFC_SLI_REV3) { + word0 = readl(phba->HAregaddr); + word1 = readl(phba->CAregaddr); + word2 = readl(phba->HSregaddr); + word3 = readl(phba->HCregaddr); + len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " + "HC:%08x\n", word0, word1, word2, word3); + } spin_unlock_irq(&phba->hbalock); return len; } diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index ffd108972072..1142070e9484 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -135,6 +135,7 @@ struct lpfc_nodelist { #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ +#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */ /* ndlp usage management macros */ #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b8b34cf5c3d2..6bdeb14878a2 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -28,8 +28,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) uint32_t ha_copy; if (vport->port_state >= LPFC_VPORT_READY || - phba->link_state == LPFC_LINK_DOWN) + phba->link_state == LPFC_LINK_DOWN || + phba->sli_rev > LPFC_SLI_REV3) return 0; /* Read the HBA Host Attention Register */ @@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.myID = vport->fc_myDID; /* For ELS_REQUEST64_CR, use the VPI by default */ - icmd->ulpContext = vport->vpi; + icmd->ulpContext = vport->vpi + phba->vpi_base; icmd->ulpCt_h = 0; /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ if (elscmd == ELS_CMD_ECHO) @@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit: * 0 - successfully issued fabric registration login for @vport * -ENXIO -- failed to issue fabric registration login for @vport **/ -static int +int lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; @@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) err = 4; goto fail; } - rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, - 0); + rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0); if (rc) { err = 5; goto fail_free_mbox; @@ -386,6 +388,75 @@ fail: } /** + * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login + * @vport: pointer to a host virtual N_Port data structure. + * + * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for + * the @vport. This mailbox command is necessary for FCoE only. + * + * Return code + * 0 - successfully issued REG_VFI for @vport + * A failure code otherwise. + **/ +static int +lpfc_issue_reg_vfi(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mboxq; + struct lpfc_nodelist *ndlp; + struct serv_parm *sp; + struct lpfc_dmabuf *dmabuf; + int rc = 0; + + sp = &phba->fc_fabparam; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + rc = -ENODEV; + goto fail; + } + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) { + rc = -ENOMEM; + goto fail; + } + dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); + if (!dmabuf->virt) { + rc = -ENOMEM; + goto fail_free_dmabuf; + } + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + rc = -ENOMEM; + goto fail_free_coherent; + } + vport->port_state = LPFC_FABRIC_CFG_LINK; + memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); + lpfc_reg_vfi(mboxq, vport, dmabuf->phys); + mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; + mboxq->vport = vport; + mboxq->context1 = dmabuf; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + rc = -ENXIO; + goto fail_free_mbox; + } + return 0; + +fail_free_mbox: + mempool_free(mboxq, phba->mbox_mem_pool); +fail_free_coherent: + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); +fail_free_dmabuf: + kfree(dmabuf); +fail: + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0289 Issue Register VFI failed: Err %d\n", rc); + return rc; +} + +/** * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. @@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } } - lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); - - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && - vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { - lpfc_register_new_vport(phba, vport, ndlp); - return 0; + if (phba->sli_rev < LPFC_SLI_REV4) { + lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && + vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) + lpfc_register_new_vport(phba, vport, ndlp); + else + lpfc_issue_fabric_reglogin(vport); + } else { + ndlp->nlp_type |= NLP_FABRIC; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + if (vport->vfi_state & LPFC_VFI_REGISTERED) { + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } else + lpfc_issue_reg_vfi(vport); } - lpfc_issue_fabric_reglogin(vport); return 0; } - /** * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port * @vport: pointer to a host virtual N_Port data structure. @@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + if (phba->sli_rev == LPFC_SLI_REV4) { + elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); + elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); + /* FLOGI needs to be 3 for WQE FCFI */ + /* Set the fcfi to the fcfi we registered with */ + elsiocb->iocb.ulpContext = phba->fcf.fcfi; + } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { sp->cmn.request_multiple_Nport = 1; - /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ icmd->ulpCt_h = 1; icmd->ulpCt_l = 0; @@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport) if (!ndlp) return 0; lpfc_nlp_init(vport, ndlp, Fabric_DID); + /* Set the node type */ + ndlp->nlp_type |= NLP_FABRIC; /* Put ndlp onto node list */ lpfc_enqueue_node(vport, ndlp); } else if (!NLP_CHK_NODE_ACT(ndlp)) { @@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) IOCB_t *icmd; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int ret; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ ndlp = lpfc_findnode_did(vport, did); if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) @@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) phba->fc_stat.elsXmitPLOGI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; - ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (ret == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, PRLI *npr; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; - struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_PRLI); @@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_PRLI_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_PRLI_SND; spin_unlock_irq(shost->host_lock); @@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport) * and continue discovery. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - !(vport->fc_flag & FC_RSCN_MODE)) { + !(vport->fc_flag & FC_RSCN_MODE) && + (phba->sli_rev < LPFC_SLI_REV4)) { lpfc_issue_reg_vpi(phba, vport); return; } @@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ADISC *ap; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; uint8_t *pcmd; uint16_t cmdsize; @@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_ADISC_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_ADISC_SND; spin_unlock_irq(shost->host_lock); @@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; - struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; - psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; - spin_lock_irq(shost->host_lock); if (ndlp->nlp_flag & NLP_LOGO_SND) { spin_unlock_irq(shost->host_lock); @@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { spin_lock_irq(shost->host_lock); @@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; struct lpfc_nodelist *ndlp; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = (sizeof(uint32_t) + sizeof(SCR)); ndlp = lpfc_findnode_did(vport, nportid); @@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) phba->fc_stat.elsXmitSCR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following * lpfc_els_free_iocb routine to trigger the rlease of * the node. @@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; FARP *fp; uint8_t *pcmd; @@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_nodelist *ndlp; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = (sizeof(uint32_t) + sizeof(FARP)); ndlp = lpfc_findnode_did(vport, nportid); @@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) phba->fc_stat.elsXmitFARPR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following * lpfc_els_free_iocb routine to trigger the release of * the node. @@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; + /* + * This routine is used to register and unregister in previous SLI + * modes. + */ + if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && + (phba->sli_rev == LPFC_SLI_REV4)) + lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); + pmb->context1 = NULL; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); @@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) */ lpfc_nlp_not_used(ndlp); } + return; } @@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; @@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, ELS_PKT *els_pkt_ptr; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ oldcmd = &oldiocb->iocb; switch (flag) { @@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } phba->fc_stat.elsXmitACC++; - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ - cmdsize = 2 * sizeof(uint32_t); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); @@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, phba->fc_stat.elsXmitLSRJT++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; ADISC *ap; IOCB_t *icmd, *oldcmd; struct lpfc_iocbq *elsiocb; @@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, phba->fc_stat.elsXmitACC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = sizeof(uint32_t) + sizeof(PRLI); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, @@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, phba->fc_stat.elsXmitACC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, RNID *rn; IOCB_t *icmd, *oldcmd; struct lpfc_iocbq *elsiocb; - struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; - pring = &psli->ring[LPFC_ELS_RING]; - cmdsize = sizeof(uint32_t) + sizeof(uint32_t) + (2 * sizeof(struct lpfc_name)); if (format) @@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, * it could be freed */ - rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) payload_len -= sizeof(uint32_t); switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { case RSCN_ADDRESS_FORMAT_PORT: - if (ns_did.un.word == rscn_did.un.word) + if ((ns_did.un.b.domain == rscn_did.un.b.domain) + && (ns_did.un.b.area == rscn_did.un.b.area) + && (ns_did.un.b.id == rscn_did.un.b.id)) goto return_did_out; break; case RSCN_ADDRESS_FORMAT_AREA: @@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_init_link(phba, mbox, phba->cfg_topology, phba->cfg_link_speed); - mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; + mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); @@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, static void lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; MAILBOX_t *mb; IOCB_t *icmd; RPS_RSP *rps_rsp; @@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) uint16_t xri, status; uint32_t cmdsize; - mb = &pmb->mb; + mb = &pmb->u.mb; ndlp = (struct lpfc_nodelist *) pmb->context2; xri = (uint16_t) ((unsigned long)(pmb->context1)); @@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_rpi); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) lpfc_els_free_iocb(phba, elsiocb); return; } @@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, IOCB_t *icmd, *oldcmd; RPL_RSP rpl_rsp; struct lpfc_iocbq *elsiocb; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; uint8_t *pcmd; elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, @@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, ndlp->nlp_rpi); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; } @@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } else { /* FAN verified - skip FLOGI */ vport->fc_myDID = vport->fc_prevDID; - lpfc_issue_fabric_reglogin(vport); + if (phba->sli_rev < LPFC_SLI_REV4) + lpfc_issue_fabric_reglogin(vport); + else + lpfc_issue_reg_vfi(vport); } } return 0; @@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, dropit: if (vport && !(vport->load_flag & FC_UNLOADING)) - lpfc_printf_log(phba, KERN_ERR, LOG_ELS, - "(%d):0111 Dropping received ELS cmd " + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0111 Dropping received ELS cmd " "Data: x%x x%x x%x\n", - vport->vpi, icmd->ulpStatus, - icmd->un.ulpWord[4], icmd->ulpTimeout); + icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); phba->fc_stat.elsRcvDrop++; } @@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { if (icmd->unsli3.rcvsli3.vpi == 0xffff) vport = phba->pport; - else { - uint16_t vpi = icmd->unsli3.rcvsli3.vpi; - vport = lpfc_find_vport_by_vpid(phba, vpi); - } + else + vport = lpfc_find_vport_by_vpid(phba, + icmd->unsli3.rcvsli3.vpi - phba->vpi_base); } /* If there are no BDEs associated * with this IOCB, there is nothing to do. @@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; @@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } else { if (vport == phba->pport) - lpfc_issue_fabric_reglogin(vport); + if (phba->sli_rev < LPFC_SLI_REV4) + lpfc_issue_fabric_reglogin(vport); + else + lpfc_issue_reg_vfi(vport); else lpfc_do_scr_ns_plogi(phba, vport); } @@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { - lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); + lpfc_reg_vpi(vport, mbox); mbox->vport = vport; mbox->context2 = lpfc_nlp_get(ndlp); mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; @@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; @@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == + IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); @@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) struct lpfc_iocbq *iocb; unsigned long iflags; int ret; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; IOCB_t *cmd; repeat: @@ -6248,7 +6319,7 @@ repeat: "Fabric sched1: ste:x%x", iocb->vport->port_state, 0, 0); - ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); if (ret == IOCB_ERROR) { iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; @@ -6394,7 +6465,6 @@ static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) { unsigned long iflags; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; int ready; int ret; @@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) "Fabric sched2: ste:x%x", iocb->vport->port_state, 0, 0); - ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); if (ret == IOCB_ERROR) { iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; @@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } + +/** + * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the els xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 slow-path + * ELS aborted xri. + **/ +void +lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); + list_for_each_entry_safe(sglq_entry, sglq_next, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { + if (sglq_entry->sli4_xritag == xri) { + list_del(&sglq_entry->list); + spin_unlock_irqrestore( + &phba->sli4_hba.abts_sgl_list_lock, + iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + + list_add_tail(&sglq_entry->list, + &phba->sli4_hba.lpfc_sgl_list); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + } + } + spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); +} diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index e764ce0bf704..35c41ae75be2 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -29,10 +29,12 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" @@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + + lpfc_unregister_unused_fcf(phba); } /** @@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) { ret = kzalloc(sizeof(struct lpfc_fast_path_event), GFP_ATOMIC); - if (ret) + if (ret) { atomic_inc(&phba->fast_event_count); - INIT_LIST_HEAD(&ret->work_evt.evt_listp); - ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; + INIT_LIST_HEAD(&ret->work_evt.evt_listp); + ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; + } return ret; } @@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba) phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); + /* First, try to post the next mailbox command to SLI4 device */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) + lpfc_sli4_post_async_mbox(phba); + if (ha_copy & HA_ERATT) /* Handle the error attention event */ lpfc_handle_eratt(phba); @@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba) if (ha_copy & HA_LATT) lpfc_handle_latt(phba); + /* Process SLI4 events */ + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { + if (phba->hba_flag & FCP_XRI_ABORT_EVENT) + lpfc_sli4_fcp_xri_abort_event_proc(phba); + if (phba->hba_flag & ELS_XRI_ABORT_EVENT) + lpfc_sli4_els_xri_abort_event_proc(phba); + if (phba->hba_flag & ASYNC_EVENT) + lpfc_sli4_async_event_proc(phba); + if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; + spin_unlock_irq(&phba->hbalock); + lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); + } + if (phba->hba_flag & HBA_RECEIVE_BUFFER) + lpfc_sli4_handle_received_buffer(phba); + } + vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi; i++) { + for (i = 0; i <= phba->max_vports; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport @@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba) /* * Turn on Ring interrupts */ - spin_lock_irq(&phba->hbalock); - control = readl(phba->HCregaddr); - if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { - lpfc_debugfs_slow_ring_trc(phba, - "WRK Enable ring: cntl:x%x hacopy:x%x", - control, ha_copy, 0); - - control |= (HC_R0INT_ENA << LPFC_ELS_RING); - writel(control, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - } - else { - lpfc_debugfs_slow_ring_trc(phba, - "WRK Ring ok: cntl:x%x hacopy:x%x", - control, ha_copy, 0); + if (phba->sli_rev <= LPFC_SLI_REV3) { + spin_lock_irq(&phba->hbalock); + control = readl(phba->HCregaddr); + if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { + lpfc_debugfs_slow_ring_trc(phba, + "WRK Enable ring: cntl:x%x hacopy:x%x", + control, ha_copy, 0); + + control |= (HC_R0INT_ENA << LPFC_ELS_RING); + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } else { + lpfc_debugfs_slow_ring_trc(phba, + "WRK Ring ok: cntl:x%x hacopy:x%x", + control, ha_copy, 0); + } + spin_unlock_irq(&phba->hbalock); } - spin_unlock_irq(&phba->hbalock); } lpfc_work_list_done(phba); } @@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport) lpfc_can_disctmo(vport); } -static void +void lpfc_linkdown_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba) if (phba->link_state == LPFC_LINK_DOWN) return 0; spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); if (phba->link_state > LPFC_LINK_DOWN) { phba->link_state = LPFC_LINK_DOWN; phba->pport->fc_flag &= ~FC_LBIT; @@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); } @@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && + (phba->sli_rev < LPFC_SLI_REV4)) lpfc_issue_clear_la(phba, phba->pport); return 0; @@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_sli *psli = &phba->sli; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; uint32_t control; /* Since we don't do discovery right now, turn these off here */ @@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; - if (pmb->mb.mbxStatus) + if (pmb->u.mb.mbxStatus) goto out; mempool_free(pmb, phba->mbox_mem_pool); @@ -945,7 +975,7 @@ out: lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0306 CONFIG_LINK mbxStatus error x%x " "HBA state x%x\n", - pmb->mb.mbxStatus, vport->port_state); + pmb->u.mb.mbxStatus, vport->port_state); mempool_free(pmb, phba->mbox_mem_pool); lpfc_linkdown(phba); @@ -959,9 +989,592 @@ out: } static void +lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + unsigned long flags; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + "2017 REG_FCFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + mempool_free(mboxq, phba->mbox_mem_pool); + return; + } + + /* Start FCoE discovery by sending a FLOGI. */ + phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); + /* Set the FCFI registered flag */ + spin_lock_irqsave(&phba->hbalock, flags); + phba->fcf.fcf_flag |= FCF_REGISTERED; + spin_unlock_irqrestore(&phba->hbalock, flags); + if (vport->port_state != LPFC_FLOGI) { + spin_lock_irqsave(&phba->hbalock, flags); + phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); + spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_initial_flogi(vport); + } + + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_fab_name_match - Check if the fcf fabric name match. + * @fab_name: pointer to fabric name. + * @new_fcf_record: pointer to fcf record. + * + * This routine compare the fcf record's fabric name with provided + * fabric name. If the fabric name are identical this function + * returns 1 else return 0. + **/ +static uint32_t +lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) +{ + if ((fab_name[0] == + bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && + (fab_name[1] == + bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && + (fab_name[2] == + bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) && + (fab_name[3] == + bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) && + (fab_name[4] == + bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) && + (fab_name[5] == + bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) && + (fab_name[6] == + bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) && + (fab_name[7] == + bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))) + return 1; + else + return 0; +} + +/** + * lpfc_mac_addr_match - Check if the fcf mac address match. + * @phba: pointer to lpfc hba data structure. + * @new_fcf_record: pointer to fcf record. + * + * This routine compare the fcf record's mac address with HBA's + * FCF mac address. If the mac addresses are identical this function + * returns 1 else return 0. + **/ +static uint32_t +lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) +{ + if ((phba->fcf.mac_addr[0] == + bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && + (phba->fcf.mac_addr[1] == + bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) && + (phba->fcf.mac_addr[2] == + bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) && + (phba->fcf.mac_addr[3] == + bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) && + (phba->fcf.mac_addr[4] == + bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) && + (phba->fcf.mac_addr[5] == + bf_get(lpfc_fcf_record_mac_5, new_fcf_record))) + return 1; + else + return 0; +} + +/** + * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. + * @phba: pointer to lpfc hba data structure. + * @new_fcf_record: pointer to fcf record. + * + * This routine copies the FCF information from the FCF + * record to lpfc_hba data structure. + **/ +static void +lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) +{ + phba->fcf.fabric_name[0] = + bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); + phba->fcf.fabric_name[1] = + bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); + phba->fcf.fabric_name[2] = + bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); + phba->fcf.fabric_name[3] = + bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); + phba->fcf.fabric_name[4] = + bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); + phba->fcf.fabric_name[5] = + bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); + phba->fcf.fabric_name[6] = + bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); + phba->fcf.fabric_name[7] = + bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); + phba->fcf.mac_addr[0] = + bf_get(lpfc_fcf_record_mac_0, new_fcf_record); + phba->fcf.mac_addr[1] = + bf_get(lpfc_fcf_record_mac_1, new_fcf_record); + phba->fcf.mac_addr[2] = + bf_get(lpfc_fcf_record_mac_2, new_fcf_record); + phba->fcf.mac_addr[3] = + bf_get(lpfc_fcf_record_mac_3, new_fcf_record); + phba->fcf.mac_addr[4] = + bf_get(lpfc_fcf_record_mac_4, new_fcf_record); + phba->fcf.mac_addr[5] = + bf_get(lpfc_fcf_record_mac_5, new_fcf_record); + phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + phba->fcf.priority = new_fcf_record->fip_priority; +} + +/** + * lpfc_register_fcf - Register the FCF with hba. + * @phba: pointer to lpfc hba data structure. + * + * This routine issues a register fcfi mailbox command to register + * the fcf with HBA. + **/ +static void +lpfc_register_fcf(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *fcf_mbxq; + int rc; + unsigned long flags; + + spin_lock_irqsave(&phba->hbalock, flags); + + /* If the FCF is not availabe do nothing. */ + if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + return; + } + + /* The FCF is already registered, start discovery */ + if (phba->fcf.fcf_flag & FCF_REGISTERED) { + phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); + spin_unlock_irqrestore(&phba->hbalock, flags); + if (phba->pport->port_state != LPFC_FLOGI) + lpfc_initial_flogi(phba->pport); + return; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + + fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!fcf_mbxq) + return; + + lpfc_reg_fcfi(phba, fcf_mbxq); + fcf_mbxq->vport = phba->pport; + fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; + rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + mempool_free(fcf_mbxq, phba->mbox_mem_pool); + + return; +} + +/** + * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. + * @phba: pointer to lpfc hba data structure. + * @new_fcf_record: pointer to fcf record. + * @boot_flag: Indicates if this record used by boot bios. + * @addr_mode: The address mode to be used by this FCF + * + * This routine compare the fcf record with connect list obtained from the + * config region to decide if this FCF can be used for SAN discovery. It returns + * 1 if this record can be used for SAN discovery else return zero. If this FCF + * record can be used for SAN discovery, the boot_flag will indicate if this FCF + * is used by boot bios and addr_mode will indicate the addressing mode to be + * used for this FCF when the function returns. + * If the FCF record need to be used with a particular vlan id, the vlan is + * set in the vlan_id on return of the function. If not VLAN tagging need to + * be used with the FCF vlan_id will be set to 0xFFFF; + **/ +static int +lpfc_match_fcf_conn_list(struct lpfc_hba *phba, + struct fcf_record *new_fcf_record, + uint32_t *boot_flag, uint32_t *addr_mode, + uint16_t *vlan_id) +{ + struct lpfc_fcf_conn_entry *conn_entry; + + if (!phba->cfg_enable_fip) { + *boot_flag = 0; + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + if (phba->valid_vlan) + *vlan_id = phba->vlan_id; + else + *vlan_id = 0xFFFF; + return 1; + } + + /* + * If there are no FCF connection table entry, driver connect to all + * FCFs. + */ + if (list_empty(&phba->fcf_conn_rec_list)) { + *boot_flag = 0; + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + *vlan_id = 0xFFFF; + return 1; + } + + list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { + if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) + continue; + + if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && + !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, + new_fcf_record)) + continue; + + if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { + /* + * If the vlan bit map does not have the bit set for the + * vlan id to be used, then it is not a match. + */ + if (!(new_fcf_record->vlan_bitmap + [conn_entry->conn_rec.vlan_tag / 8] & + (1 << (conn_entry->conn_rec.vlan_tag % 8)))) + continue; + } + + /* + * Check if the connection record specifies a required + * addressing mode. + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { + + /* + * If SPMA required but FCF not support this continue. + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + !(bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record) & LPFC_FCF_SPMA)) + continue; + + /* + * If FPMA required but FCF not support this continue. + */ + if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + !(bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record) & LPFC_FCF_FPMA)) + continue; + } + + /* + * This fcf record matches filtering criteria. + */ + if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) + *boot_flag = 1; + else + *boot_flag = 0; + + *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, + new_fcf_record); + /* + * If the user specified a required address mode, assign that + * address mode + */ + if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) + *addr_mode = (conn_entry->conn_rec.flags & + FCFCNCT_AM_SPMA) ? + LPFC_FCF_SPMA : LPFC_FCF_FPMA; + /* + * If the user specified a prefered address mode, use the + * addr mode only if FCF support the addr_mode. + */ + else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + (*addr_mode & LPFC_FCF_SPMA)) + *addr_mode = LPFC_FCF_SPMA; + else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && + (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && + !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && + (*addr_mode & LPFC_FCF_FPMA)) + *addr_mode = LPFC_FCF_FPMA; + /* + * If user did not specify any addressing mode, use FPMA if + * possible else use SPMA. + */ + else if (*addr_mode & LPFC_FCF_FPMA) + *addr_mode = LPFC_FCF_FPMA; + + if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) + *vlan_id = conn_entry->conn_rec.vlan_tag; + else + *vlan_id = 0xFFFF; + + return 1; + } + + return 0; +} + +/** + * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This function iterate through all the fcf records available in + * HBA and choose the optimal FCF record for discovery. After finding + * the FCF for discovery it register the FCF record and kick start + * discovery. + * If FCF_IN_USE flag is set in currently used FCF, the routine try to + * use a FCF record which match fabric name and mac address of the + * currently used FCF record. + * If the driver support only one FCF, it will try to use the FCF record + * used by BOOT_BIOS. + */ +void +lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + void *virt_addr; + dma_addr_t phys_addr; + uint8_t *bytep; + struct lpfc_mbx_sge sge; + struct lpfc_mbx_read_fcf_tbl *read_fcf; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + struct fcf_record *new_fcf_record; + int rc; + uint32_t boot_flag, addr_mode; + uint32_t next_fcf_index; + unsigned long flags; + uint16_t vlan_id; + + /* Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); + if (unlikely(!mboxq->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2524 Failed to get the non-embedded SGE " + "virtual address\n"); + goto out; + } + virt_addr = mboxq->sge_array->addr[0]; + + shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + /* + * The FCF Record was read and there is no reason for the driver + * to maintain the FCF record data or memory. Instead, just need + * to book keeping the FCFIs can be used. + */ + if (shdr_status || shdr_add_status) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2521 READ_FCF_RECORD mailbox failed " + "with status x%x add_status x%x, mbx\n", + shdr_status, shdr_add_status); + goto out; + } + /* Interpreting the returned information of FCF records */ + read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; + lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, + sizeof(struct lpfc_mbx_read_fcf_tbl)); + next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); + + new_fcf_record = (struct fcf_record *)(virt_addr + + sizeof(struct lpfc_mbx_read_fcf_tbl)); + lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, + sizeof(struct fcf_record)); + bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, + &boot_flag, &addr_mode, + &vlan_id); + /* + * If the fcf record does not match with connect list entries + * read the next entry. + */ + if (!rc) + goto read_next_fcf; + /* + * If this is not the first FCF discovery of the HBA, use last + * FCF record for the discovery. + */ + spin_lock_irqsave(&phba->hbalock, flags); + if (phba->fcf.fcf_flag & FCF_IN_USE) { + if (lpfc_fab_name_match(phba->fcf.fabric_name, + new_fcf_record) && + lpfc_mac_addr_match(phba, new_fcf_record)) { + phba->fcf.fcf_flag |= FCF_AVAILABLE; + spin_unlock_irqrestore(&phba->hbalock, flags); + goto out; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + if (phba->fcf.fcf_flag & FCF_AVAILABLE) { + /* + * If the current FCF record does not have boot flag + * set and new fcf record has boot flag set, use the + * new fcf record. + */ + if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { + /* Use this FCF record */ + lpfc_copy_fcf_record(phba, new_fcf_record); + phba->fcf.addr_mode = addr_mode; + phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; + if (vlan_id != 0xFFFF) { + phba->fcf.fcf_flag |= FCF_VALID_VLAN; + phba->fcf.vlan_id = vlan_id; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + /* + * If the current FCF record has boot flag set and the + * new FCF record does not have boot flag, read the next + * FCF record. + */ + if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + /* + * If there is a record with lower priority value for + * the current FCF, use that record. + */ + if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) + && (new_fcf_record->fip_priority < + phba->fcf.priority)) { + /* Use this FCF record */ + lpfc_copy_fcf_record(phba, new_fcf_record); + phba->fcf.addr_mode = addr_mode; + if (vlan_id != 0xFFFF) { + phba->fcf.fcf_flag |= FCF_VALID_VLAN; + phba->fcf.vlan_id = vlan_id; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + } + /* + * This is the first available FCF record, use this + * record. + */ + lpfc_copy_fcf_record(phba, new_fcf_record); + phba->fcf.addr_mode = addr_mode; + if (boot_flag) + phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; + phba->fcf.fcf_flag |= FCF_AVAILABLE; + if (vlan_id != 0xFFFF) { + phba->fcf.fcf_flag |= FCF_VALID_VLAN; + phba->fcf.vlan_id = vlan_id; + } + spin_unlock_irqrestore(&phba->hbalock, flags); + goto read_next_fcf; + +read_next_fcf: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) + lpfc_register_fcf(phba); + else + lpfc_sli4_read_fcf_record(phba, next_fcf_index); + return; + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + lpfc_register_fcf(phba); + + return; +} + +/** + * lpfc_start_fdiscs - send fdiscs for each vports on this port. + * @phba: pointer to lpfc hba data structure. + * + * This function loops through the list of vports on the @phba and issues an + * FDISC if possible. + */ +void +lpfc_start_fdiscs(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + if (vports[i]->port_type == LPFC_PHYSICAL_PORT) + continue; + /* There are no vpi for this vport */ + if (vports[i]->vpi > phba->max_vpi) { + lpfc_vport_set_state(vports[i], + FC_VPORT_FAILED); + continue; + } + if (phba->fc_topology == TOPOLOGY_LOOP) { + lpfc_vport_set_state(vports[i], + FC_VPORT_LINKDOWN); + continue; + } + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) + lpfc_initial_fdisc(vports[i]); + else { + lpfc_vport_set_state(vports[i], + FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_vlog(vports[i], KERN_ERR, + LOG_ELS, + "0259 No NPIV " + "Fabric support\n"); + } + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + +void +lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_dmabuf *dmabuf = mboxq->context1; + struct lpfc_vport *vport = mboxq->vport; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + "2018 REG_VFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + if (phba->fc_topology == TOPOLOGY_LOOP) { + /* FLOGI failed, use loop map to make discovery list */ + lpfc_disc_list_loopmap(vport); + /* Start discovery */ + lpfc_disc_start(vport); + goto fail_free_mem; + } + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + goto fail_free_mem; + } + /* Mark the vport has registered with its VFI */ + vport->vfi_state |= LPFC_VFI_REGISTERED; + + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { + lpfc_start_fdiscs(phba); + lpfc_do_scr_ns_plogi(phba, vport); + } + +fail_free_mem: + mempool_free(mboxq, phba->mbox_mem_pool); + lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + return; +} + +static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; struct lpfc_vport *vport = pmb->vport; @@ -1012,13 +1625,13 @@ static void lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) { struct lpfc_vport *vport = phba->pport; - LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; + LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; int i; struct lpfc_dmabuf *mp; int rc; + struct fcf_record *fcf_record; sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); spin_lock_irq(&phba->hbalock); switch (la->UlnkSpeed) { @@ -1034,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) case LA_8GHZ_LINK: phba->fc_linkspeed = LA_8GHZ_LINK; break; + case LA_10GHZ_LINK: + phba->fc_linkspeed = LA_10GHZ_LINK; + break; default: phba->fc_linkspeed = LA_UNKNW_LINK; break; @@ -1115,22 +1731,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(sparam_mbox, phba->mbox_mem_pool); - if (cfglink_mbox) - mempool_free(cfglink_mbox, phba->mbox_mem_pool); goto out; } } - if (cfglink_mbox) { + if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { + cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!cfglink_mbox) + goto out; vport->port_state = LPFC_LOCAL_CFG_LINK; lpfc_config_link(phba, cfglink_mbox); cfglink_mbox->vport = vport; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); - if (rc != MBX_NOT_FINISHED) - return; - mempool_free(cfglink_mbox, phba->mbox_mem_pool); + if (rc == MBX_NOT_FINISHED) { + mempool_free(cfglink_mbox, phba->mbox_mem_pool); + goto out; + } + } else { + /* + * Add the driver's default FCF record at FCF index 0 now. This + * is phase 1 implementation that support FCF index 0 and driver + * defaults. + */ + if (phba->cfg_enable_fip == 0) { + fcf_record = kzalloc(sizeof(struct fcf_record), + GFP_KERNEL); + if (unlikely(!fcf_record)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_MBOX | LOG_SLI, + "2554 Could not allocate memmory for " + "fcf record\n"); + rc = -ENODEV; + goto out; + } + + lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, + LPFC_FCOE_FCF_DEF_INDEX); + rc = lpfc_sli4_add_fcf_record(phba, fcf_record); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, + LOG_MBOX | LOG_SLI, + "2013 Could not manually add FCF " + "record 0, status %d\n", rc); + rc = -ENODEV; + kfree(fcf_record); + goto out; + } + kfree(fcf_record); + } + /* + * The driver is expected to do FIP/FCF. Call the port + * and get the FCF Table. + */ + rc = lpfc_sli4_read_fcf_record(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) + goto out; } + + return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, @@ -1147,10 +1807,12 @@ lpfc_enable_la(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; - control = readl(phba->HCregaddr); - control |= HC_LAINT_ENA; - writel(control, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ + if (phba->sli_rev <= LPFC_SLI_REV3) { + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } spin_unlock_irq(&phba->hbalock); } @@ -1159,6 +1821,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { lpfc_linkdown(phba); lpfc_enable_la(phba); + lpfc_unregister_unused_fcf(phba); /* turn on Link Attention interrupts - no CLEAR_LA needed */ } @@ -1175,7 +1838,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); READ_LA_VAR *la; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); /* Unblock ELS traffic */ @@ -1190,7 +1853,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto lpfc_mbx_cmpl_read_la_free_mbuf; } - la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; + la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; memcpy(&phba->alpa_map[0], mp->virt, 128); @@ -1328,7 +1991,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) static void lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -1381,7 +2044,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; switch (mb->mbxStatus) { case 0x0011: @@ -1416,6 +2079,128 @@ out: return; } +/** + * lpfc_create_static_vport - Read HBA config region to create static vports. + * @phba: pointer to lpfc hba data structure. + * + * This routine issue a DUMP mailbox command for config region 22 to get + * the list of static vports to be created. The function create vports + * based on the information returned from the HBA. + **/ +void +lpfc_create_static_vport(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmb = NULL; + MAILBOX_t *mb; + struct static_vport_info *vport_info; + int rc, i; + struct fc_vport_identifiers vport_id; + struct fc_vport *new_fc_vport; + struct Scsi_Host *shost; + struct lpfc_vport *vport; + uint16_t offset = 0; + uint8_t *vport_buff; + + pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0542 lpfc_create_static_vport failed to" + " allocate mailbox memory\n"); + return; + } + + mb = &pmb->u.mb; + + vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); + if (!vport_info) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0543 lpfc_create_static_vport failed to" + " allocate vport_info\n"); + mempool_free(pmb, phba->mbox_mem_pool); + return; + } + + vport_buff = (uint8_t *) vport_info; + do { + lpfc_dump_static_vport(phba, pmb, offset); + pmb->vport = phba->pport; + rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); + + if ((rc != MBX_SUCCESS) || mb->mbxStatus) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0544 lpfc_create_static_vport failed to" + " issue dump mailbox command ret 0x%x " + "status 0x%x\n", + rc, mb->mbxStatus); + goto out; + } + + if (mb->un.varDmp.word_cnt > + sizeof(struct static_vport_info) - offset) + mb->un.varDmp.word_cnt = + sizeof(struct static_vport_info) - offset; + + lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, + vport_buff + offset, + mb->un.varDmp.word_cnt); + offset += mb->un.varDmp.word_cnt; + + } while (mb->un.varDmp.word_cnt && + offset < sizeof(struct static_vport_info)); + + + if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || + ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) + != VPORT_INFO_REV)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0545 lpfc_create_static_vport bad" + " information header 0x%x 0x%x\n", + le32_to_cpu(vport_info->signature), + le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); + + goto out; + } + + shost = lpfc_shost_from_vport(phba->pport); + + for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { + memset(&vport_id, 0, sizeof(vport_id)); + vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); + vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); + if (!vport_id.port_name || !vport_id.node_name) + continue; + + vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; + vport_id.vport_type = FC_PORTTYPE_NPIV; + vport_id.disable = false; + new_fc_vport = fc_vport_create(shost, 0, &vport_id); + + if (!new_fc_vport) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0546 lpfc_create_static_vport failed to" + " create vport \n"); + continue; + } + + vport = *(struct lpfc_vport **)new_fc_vport->dd_data; + vport->vport_flag |= STATIC_VPORT; + } + +out: + /* + * If this is timed out command, setting NULL to context2 tell SLI + * layer not to use this buffer. + */ + spin_lock_irq(&phba->hbalock); + pmb->context2 = NULL; + spin_unlock_irq(&phba->hbalock); + kfree(vport_info); + if (rc != MBX_TIMEOUT) + mempool_free(pmb, phba->mbox_mem_pool); + + return; +} + /* * This routine handles processing a Fabric REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ @@ -1426,16 +2211,17 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp; - struct lpfc_vport **vports; - int i; ndlp = (struct lpfc_nodelist *) pmb->context2; pmb->context1 = NULL; pmb->context2 = NULL; if (mb->mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + "0258 Register Fabric login error: 0x%x\n", + mb->mbxStatus); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); @@ -1454,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "0258 Register Fabric login error: 0x%x\n", - mb->mbxStatus); /* Decrement the reference count to ndlp after the reference * to the ndlp are done. */ @@ -1465,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) - for(i = 0; - i <= phba->max_vpi && vports[i] != NULL; - i++) { - if (vports[i]->port_type == LPFC_PHYSICAL_PORT) - continue; - if (phba->fc_topology == TOPOLOGY_LOOP) { - lpfc_vport_set_state(vports[i], - FC_VPORT_LINKDOWN); - continue; - } - if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) - lpfc_initial_fdisc(vports[i]); - else { - lpfc_vport_set_state(vports[i], - FC_VPORT_NO_FABRIC_SUPP); - lpfc_printf_vlog(vport, KERN_ERR, - LOG_ELS, - "0259 No NPIV " - "Fabric support\n"); - } - } - lpfc_destroy_vport_work_array(phba, vports); + lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } @@ -1516,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; if (mb->mbxStatus) { out: + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0260 Register NameServer error: 0x%x\n", + mb->mbxStatus); /* decrement the node reference count held for this * callback function. */ @@ -1546,15 +2310,13 @@ out: return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0260 Register NameServer error: 0x%x\n", - mb->mbxStatus); return; } pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -2055,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, if (pring->ringno == LPFC_ELS_RING) { switch (icmd->ulpCommand) { case CMD_GEN_REQUEST64_CR: - if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) + if (iocb->context_un.ndlp == ndlp) return 1; case CMD_ELS_REQUEST64_CR: if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) @@ -2102,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) */ psli = &phba->sli; rpi = ndlp->nlp_rpi; - if (rpi) { + if (ndlp->nlp_flag & NLP_RPI_VALID) { /* Now process each ring */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; @@ -2150,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LPFC_MBOXQ_t *mbox; int rc; - if (ndlp->nlp_rpi) { + if (ndlp->nlp_flag & NLP_RPI_VALID) { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); @@ -2162,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } lpfc_no_rpi(phba, ndlp); ndlp->nlp_rpi = 0; + ndlp->nlp_flag &= ~NLP_RPI_VALID; return 1; } return 0; @@ -2252,7 +3015,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { - if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -2261,7 +3024,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { - if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { @@ -2309,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc; lpfc_cancel_retry_delay_tmo(vport, ndlp); - if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { + if ((ndlp->nlp_flag & NLP_DEFER_RM) && + !(ndlp->nlp_flag & NLP_RPI_VALID)) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { - rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, + rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, (uint8_t *) &vport->fc_sparam, mbox, 0); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); @@ -2553,7 +3317,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) * clear_la then don't send it. */ if ((phba->link_state >= LPFC_CLEAR_LA) || - (vport->port_type != LPFC_PHYSICAL_PORT)) + (vport->port_type != LPFC_PHYSICAL_PORT) || + (phba->sli_rev == LPFC_SLI_REV4)) return; /* Link up discovery */ @@ -2582,7 +3347,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (regvpimbox) { - lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); + lpfc_reg_vpi(vport, regvpimbox); regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; regvpimbox->vport = vport; if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) @@ -2642,7 +3407,8 @@ lpfc_disc_start(struct lpfc_vport *vport) */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && - !(vport->fc_flag & FC_RSCN_MODE)) { + !(vport->fc_flag & FC_RSCN_MODE) && + (phba->sli_rev < LPFC_SLI_REV4)) { lpfc_issue_reg_vpi(phba, vport); return; } @@ -2919,11 +3685,13 @@ restart_disc: * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) - lpfc_issue_reg_vpi(phba, vport); - else { /* NPIV Not enabled */ - lpfc_issue_clear_la(phba, vport); - vport->port_state = LPFC_VPORT_READY; + if (phba->sli_rev < LPFC_SLI_REV4) { + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; + } } /* Setup and issue mailbox INITIALIZE LINK command */ @@ -2939,7 +3707,7 @@ restart_disc: lpfc_linkdown(phba); lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, phba->cfg_link_speed); - initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; + initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; initlinkmbox->vport = vport; initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); @@ -2959,11 +3727,13 @@ restart_disc: * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) - lpfc_issue_reg_vpi(phba, vport); - else { /* NPIV Not enabled */ - lpfc_issue_clear_la(phba, vport); - vport->port_state = LPFC_VPORT_READY; + if (phba->sli_rev < LPFC_SLI_REV4) { + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; + } } break; @@ -3036,7 +3806,7 @@ restart_disc: void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; @@ -3044,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -3297,3 +4068,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) return 1; return 0; } + +/** + * lpfc_fcf_inuse - Check if FCF can be unregistered. + * @phba: Pointer to hba context object. + * + * This function iterate through all FC nodes associated + * will all vports to check if there is any node with + * fc_rports associated with it. If there is an fc_rport + * associated with the node, then the node is either in + * discovered state or its devloss_timer is pending. + */ +static int +lpfc_fcf_inuse(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i, ret = 0; + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + + vports = lpfc_create_vport_work_array(phba); + + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + shost = lpfc_shost_from_vport(vports[i]); + spin_lock_irq(shost->host_lock); + list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { + if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && + (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { + ret = 1; + spin_unlock_irq(shost->host_lock); + goto out; + } + } + spin_unlock_irq(shost->host_lock); + } +out: + lpfc_destroy_vport_work_array(phba, vports); + return ret; +} + +/** + * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. + * @phba: Pointer to hba context object. + * @mboxq: Pointer to mailbox object. + * + * This function frees memory associated with the mailbox command. + */ +static void +lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2555 UNREG_VFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + } + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. + * @phba: Pointer to hba context object. + * @mboxq: Pointer to mailbox object. + * + * This function frees memory associated with the mailbox command. + */ +static void +lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_vport *vport = mboxq->vport; + + if (mboxq->u.mb.mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2550 UNREG_FCFI mbxStatus error x%x " + "HBA state x%x\n", + mboxq->u.mb.mbxStatus, vport->port_state); + } + mempool_free(mboxq, phba->mbox_mem_pool); + return; +} + +/** + * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. + * @phba: Pointer to hba context object. + * + * This function check if there are any connected remote port for the FCF and + * if all the devices are disconnected, this function unregister FCFI. + * This function also tries to use another FCF for discovery. + */ +void +lpfc_unregister_unused_fcf(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mbox; + int rc; + struct lpfc_vport **vports; + int i; + + spin_lock_irq(&phba->hbalock); + /* + * If HBA is not running in FIP mode or + * If HBA does not support FCoE or + * If FCF is not registered. + * do nothing. + */ + if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || + !(phba->fcf.fcf_flag & FCF_REGISTERED) || + (phba->cfg_enable_fip == 0)) { + spin_unlock_irq(&phba->hbalock); + return; + } + spin_unlock_irq(&phba->hbalock); + + if (lpfc_fcf_inuse(phba)) + return; + + + /* Unregister VPIs */ + vports = lpfc_create_vport_work_array(phba); + if (vports && + (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + lpfc_mbx_unreg_vpi(vports[i]); + vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; + vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; + } + lpfc_destroy_vport_work_array(phba, vports); + + /* Unregister VFI */ + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2556 UNREG_VFI mbox allocation failed" + "HBA state x%x\n", + phba->pport->port_state); + return; + } + + lpfc_unreg_vfi(mbox, phba->pport->vfi); + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; + + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2557 UNREG_VFI issue mbox failed rc x%x " + "HBA state x%x\n", + rc, phba->pport->port_state); + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + + /* Unregister FCF */ + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2551 UNREG_FCFI mbox allocation failed" + "HBA state x%x\n", + phba->pport->port_state); + return; + } + + lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2552 UNREG_FCFI issue mbox failed rc x%x " + "HBA state x%x\n", + rc, phba->pport->port_state); + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | + FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | + FCF_VALID_VLAN); + spin_unlock_irq(&phba->hbalock); + + /* + * If driver is not unloading, check if there is any other + * FCF record that can be used for discovery. + */ + if ((phba->pport->load_flag & FC_UNLOADING) || + (phba->link_state < LPFC_LINK_UP)) + return; + + rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + + if (rc) + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, + "2553 lpfc_unregister_unused_fcf failed to read FCF" + " record HBA state x%x\n", + phba->pport->port_state); +} + +/** + * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. + * @phba: Pointer to hba context object. + * @buff: Buffer containing the FCF connection table as in the config + * region. + * This function create driver data structure for the FCF connection + * record table read from config region 23. + */ +static void +lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, + uint8_t *buff) +{ + struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + struct lpfc_fcf_conn_hdr *conn_hdr; + struct lpfc_fcf_conn_rec *conn_rec; + uint32_t record_count; + int i; + + /* Free the current connect table */ + list_for_each_entry_safe(conn_entry, next_conn_entry, + &phba->fcf_conn_rec_list, list) + kfree(conn_entry); + + conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; + record_count = conn_hdr->length * sizeof(uint32_t)/ + sizeof(struct lpfc_fcf_conn_rec); + + conn_rec = (struct lpfc_fcf_conn_rec *) + (buff + sizeof(struct lpfc_fcf_conn_hdr)); + + for (i = 0; i < record_count; i++) { + if (!(conn_rec[i].flags & FCFCNCT_VALID)) + continue; + conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), + GFP_KERNEL); + if (!conn_entry) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2566 Failed to allocate connection" + " table entry\n"); + return; + } + + memcpy(&conn_entry->conn_rec, &conn_rec[i], + sizeof(struct lpfc_fcf_conn_rec)); + conn_entry->conn_rec.vlan_tag = + le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; + conn_entry->conn_rec.flags = + le16_to_cpu(conn_entry->conn_rec.flags); + list_add_tail(&conn_entry->list, + &phba->fcf_conn_rec_list); + } +} + +/** + * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. + * @phba: Pointer to hba context object. + * @buff: Buffer containing the FCoE parameter data structure. + * + * This function update driver data structure with config + * parameters read from config region 23. + */ +static void +lpfc_read_fcoe_param(struct lpfc_hba *phba, + uint8_t *buff) +{ + struct lpfc_fip_param_hdr *fcoe_param_hdr; + struct lpfc_fcoe_params *fcoe_param; + + fcoe_param_hdr = (struct lpfc_fip_param_hdr *) + buff; + fcoe_param = (struct lpfc_fcoe_params *) + buff + sizeof(struct lpfc_fip_param_hdr); + + if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || + (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) + return; + + if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == + FIPP_MODE_ON) + phba->cfg_enable_fip = 1; + + if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == + FIPP_MODE_OFF) + phba->cfg_enable_fip = 0; + + if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { + phba->valid_vlan = 1; + phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & + 0xFFF; + } + + phba->fc_map[0] = fcoe_param->fc_map[0]; + phba->fc_map[1] = fcoe_param->fc_map[1]; + phba->fc_map[2] = fcoe_param->fc_map[2]; + return; +} + +/** + * lpfc_get_rec_conf23 - Get a record type in config region data. + * @buff: Buffer containing config region 23 data. + * @size: Size of the data buffer. + * @rec_type: Record type to be searched. + * + * This function searches config region data to find the begining + * of the record specified by record_type. If record found, this + * function return pointer to the record else return NULL. + */ +static uint8_t * +lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) +{ + uint32_t offset = 0, rec_length; + + if ((buff[0] == LPFC_REGION23_LAST_REC) || + (size < sizeof(uint32_t))) + return NULL; + + rec_length = buff[offset + 1]; + + /* + * One TLV record has one word header and number of data words + * specified in the rec_length field of the record header. + */ + while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) + <= size) { + if (buff[offset] == rec_type) + return &buff[offset]; + + if (buff[offset] == LPFC_REGION23_LAST_REC) + return NULL; + + offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); + rec_length = buff[offset + 1]; + } + return NULL; +} + +/** + * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. + * @phba: Pointer to lpfc_hba data structure. + * @buff: Buffer containing config region 23 data. + * @size: Size of the data buffer. + * + * This fuction parse the FCoE config parameters in config region 23 and + * populate driver data structure with the parameters. + */ +void +lpfc_parse_fcoe_conf(struct lpfc_hba *phba, + uint8_t *buff, + uint32_t size) +{ + uint32_t offset = 0, rec_length; + uint8_t *rec_ptr; + + /* + * If data size is less than 2 words signature and version cannot be + * verified. + */ + if (size < 2*sizeof(uint32_t)) + return; + + /* Check the region signature first */ + if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2567 Config region 23 has bad signature\n"); + return; + } + + offset += 4; + + /* Check the data structure version */ + if (buff[offset] != LPFC_REGION23_VERSION) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2568 Config region 23 has bad version\n"); + return; + } + offset += 4; + + rec_length = buff[offset + 1]; + + /* Read FCoE param record */ + rec_ptr = lpfc_get_rec_conf23(&buff[offset], + size - offset, FCOE_PARAM_TYPE); + if (rec_ptr) + lpfc_read_fcoe_param(phba, rec_ptr); + + /* Read FCF connection table */ + rec_ptr = lpfc_get_rec_conf23(&buff[offset], + size - offset, FCOE_CONN_TBL_TYPE); + if (rec_ptr) + lpfc_read_fcf_conn_tbl(phba, rec_ptr); + +} diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 4168c7b498b8..02aa016b93e9 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */ }; /* + * Virtual Fabric Tagging Header + */ +struct fc_vft_header { + uint32_t word0; +#define fc_vft_hdr_r_ctl_SHIFT 24 +#define fc_vft_hdr_r_ctl_MASK 0xFF +#define fc_vft_hdr_r_ctl_WORD word0 +#define fc_vft_hdr_ver_SHIFT 22 +#define fc_vft_hdr_ver_MASK 0x3 +#define fc_vft_hdr_ver_WORD word0 +#define fc_vft_hdr_type_SHIFT 18 +#define fc_vft_hdr_type_MASK 0xF +#define fc_vft_hdr_type_WORD word0 +#define fc_vft_hdr_e_SHIFT 16 +#define fc_vft_hdr_e_MASK 0x1 +#define fc_vft_hdr_e_WORD word0 +#define fc_vft_hdr_priority_SHIFT 13 +#define fc_vft_hdr_priority_MASK 0x7 +#define fc_vft_hdr_priority_WORD word0 +#define fc_vft_hdr_vf_id_SHIFT 1 +#define fc_vft_hdr_vf_id_MASK 0xFFF +#define fc_vft_hdr_vf_id_WORD word0 + uint32_t word1; +#define fc_vft_hdr_hopct_SHIFT 24 +#define fc_vft_hdr_hopct_MASK 0xFF +#define fc_vft_hdr_hopct_WORD word1 +}; + +/* * Extended Link Service LS_COMMAND codes (Payload Word 0) */ #ifdef __BIG_ENDIAN_BITFIELD @@ -1152,6 +1181,9 @@ typedef struct { #define PCI_DEVICE_ID_HORNET 0xfe05 #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 +#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 +#define PCI_DEVICE_ID_TIGERSHARK 0x0704 +#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705 #define JEDEC_ID_ADDRESS 0x0080001c #define FIREFLY_JEDEC_ID 0x1ACC @@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */ #define MBX_READ_LA64 0x95 #define MBX_REG_VPI 0x96 #define MBX_UNREG_VPI 0x97 -#define MBX_REG_VNPID 0x96 -#define MBX_UNREG_VNPID 0x97 #define MBX_WRITE_WWN 0x98 #define MBX_SET_DEBUG 0x99 #define MBX_LOAD_EXP_ROM 0x9C - -#define MBX_MAX_CMDS 0x9D +#define MBX_SLI4_CONFIG 0x9B +#define MBX_SLI4_REQ_FTRS 0x9D +#define MBX_MAX_CMDS 0x9E +#define MBX_RESUME_RPI 0x9E #define MBX_SLI2_CMD_MASK 0x80 +#define MBX_REG_VFI 0x9F +#define MBX_REG_FCFI 0xA0 +#define MBX_UNREG_VFI 0xA1 +#define MBX_UNREG_FCFI 0xA2 +#define MBX_INIT_VFI 0xA3 +#define MBX_INIT_VPI 0xA4 /* IOCB Commands */ @@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */ #define CMD_IOCB_LOGENTRY_CN 0x94 #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 +/* Unhandled Data Security SLI Commands */ +#define DSSCMD_IWRITE64_CR 0xD8 +#define DSSCMD_IWRITE64_CX 0xD9 +#define DSSCMD_IREAD64_CR 0xDA +#define DSSCMD_IREAD64_CX 0xDB +#define DSSCMD_INVALIDATE_DEK 0xDC +#define DSSCMD_SET_KEK 0xDD +#define DSSCMD_GET_KEK_ID 0xDE +#define DSSCMD_GEN_XFER 0xDF + #define CMD_MAX_IOCB_CMD 0xE6 #define CMD_IOCB_MASK 0xff @@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */ #define MBXERR_BAD_RCV_LENGTH 14 #define MBXERR_DMA_ERROR 15 #define MBXERR_ERROR 16 +#define MBXERR_LINK_DOWN 0x33 #define MBX_NOT_FINISHED 255 #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ @@ -1504,32 +1553,6 @@ struct ulp_bde { #endif }; -struct ulp_bde64 { /* SLI-2 */ - union ULP_BDE_TUS { - uint32_t w; - struct { -#ifdef __BIG_ENDIAN_BITFIELD - uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED - VALUE !! */ - uint32_t bdeSize:24; /* Size of buffer (in bytes) */ -#else /* __LITTLE_ENDIAN_BITFIELD */ - uint32_t bdeSize:24; /* Size of buffer (in bytes) */ - uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED - VALUE !! */ -#endif -#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ -#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ -#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ -#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ -#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ -#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ -#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ - } f; - } tus; - uint32_t addrLow; - uint32_t addrHigh; -}; - typedef struct ULP_BDL { /* SLI-2 */ #ifdef __BIG_ENDIAN_BITFIELD uint32_t bdeFlags:8; /* BDL Flags */ @@ -2287,7 +2310,7 @@ typedef struct { uint32_t rsvd3; uint32_t rsvd4; uint32_t rsvd5; - uint16_t rsvd6; + uint16_t vfi; uint16_t vpi; #else /* __LITTLE_ENDIAN */ uint32_t rsvd1; @@ -2297,7 +2320,7 @@ typedef struct { uint32_t rsvd4; uint32_t rsvd5; uint16_t vpi; - uint16_t rsvd6; + uint16_t vfi; #endif } REG_VPI_VAR; @@ -2457,7 +2480,7 @@ typedef struct { uint32_t entry_index:16; #endif - uint32_t rsvd1; + uint32_t sli4_length; uint32_t word_cnt; uint32_t resp_offset; } DUMP_VAR; @@ -2470,9 +2493,32 @@ typedef struct { #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ +#define DMP_REGION_VPORT 0x16 /* VPort info region */ +#define DMP_VPORT_REGION_SIZE 0x200 +#define DMP_MBOX_OFFSET_WORD 0x5 + +#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */ +#define DMP_FCOEPARAM_RGN_SIZE 0x400 + #define WAKE_UP_PARMS_REGION_ID 4 #define WAKE_UP_PARMS_WORD_SIZE 15 +struct vport_rec { + uint8_t wwpn[8]; + uint8_t wwnn[8]; +}; + +#define VPORT_INFO_SIG 0x32324752 +#define VPORT_INFO_REV_MASK 0xff +#define VPORT_INFO_REV 0x1 +#define MAX_STATIC_VPORT_COUNT 16 +struct static_vport_info { + uint32_t signature; + uint32_t rev; + struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; + uint32_t resvd[66]; +}; + /* Option rom version structure */ struct prog_id { #ifdef __BIG_ENDIAN_BITFIELD @@ -2697,7 +2743,9 @@ typedef struct { #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd1 : 23; /* Reserved */ + uint32_t rsvd1 : 19; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd2 : 3; /* Reserved */ uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ @@ -2717,10 +2765,14 @@ typedef struct { uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t cbg : 1; /* Configure BlockGuard */ - uint32_t rsvd1 : 23; /* Reserved */ + uint32_t rsvd2 : 3; /* Reserved */ + uint32_t cdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd1 : 19; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd2 : 23; /* Reserved */ + uint32_t rsvd3 : 19; /* Reserved */ + uint32_t gdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd4 : 3; /* Reserved */ uint32_t gbg : 1; /* Grant BlockGuard */ uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gcrp : 1; /* Grant Command Ring Polling */ @@ -2740,7 +2792,9 @@ typedef struct { uint32_t gcrp : 1; /* Grant Command Ring Polling */ uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gbg : 1; /* Grant BlockGuard */ - uint32_t rsvd2 : 23; /* Reserved */ + uint32_t rsvd4 : 3; /* Reserved */ + uint32_t gdss : 1; /* Configure Data Security SLI */ + uint32_t rsvd3 : 19; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD @@ -2753,20 +2807,20 @@ typedef struct { #ifdef __BIG_ENDIAN_BITFIELD uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ - uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ #else /* __LITTLE_ENDIAN */ - uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ #endif - uint32_t rsvd4; /* Reserved */ + uint32_t rsvd6; /* Reserved */ #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd5 : 16; /* Reserved */ + uint32_t rsvd7 : 16; /* Reserved */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ #else /* __LITTLE_ENDIAN */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ - uint32_t rsvd5 : 16; /* Reserved */ + uint32_t rsvd7 : 16; /* Reserved */ #endif } CONFIG_PORT_VAR; @@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp) #define MENLO_TIMEOUT 30 #define SETVAR_MLOMNT 0x103107 #define SETVAR_MLORST 0x103007 + +#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h new file mode 100644 index 000000000000..39c34b3ad29d --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -0,0 +1,2141 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2009 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +/* Macros to deal with bit fields. Each bit field must have 3 #defines + * associated with it (_SHIFT, _MASK, and _WORD). + * EG. For a bit field that is in the 7th bit of the "field4" field of a + * structure and is 2 bits in size the following #defines must exist: + * struct temp { + * uint32_t field1; + * uint32_t field2; + * uint32_t field3; + * uint32_t field4; + * #define example_bit_field_SHIFT 7 + * #define example_bit_field_MASK 0x03 + * #define example_bit_field_WORD field4 + * uint32_t field5; + * }; + * Then the macros below may be used to get or set the value of that field. + * EG. To get the value of the bit field from the above example: + * struct temp t1; + * value = bf_get(example_bit_field, &t1); + * And then to set that bit field: + * bf_set(example_bit_field, &t1, 2); + * Or clear that bit field: + * bf_set(example_bit_field, &t1, 0); + */ +#define bf_get(name, ptr) \ + (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) +#define bf_set(name, ptr, value) \ + ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ + ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) + +struct dma_address { + uint32_t addr_lo; + uint32_t addr_hi; +}; + +#define LPFC_SLI4_BAR0 1 +#define LPFC_SLI4_BAR1 2 +#define LPFC_SLI4_BAR2 4 + +#define LPFC_SLI4_MBX_EMBED true +#define LPFC_SLI4_MBX_NEMBED false + +#define LPFC_SLI4_MB_WORD_COUNT 64 +#define LPFC_MAX_MQ_PAGE 8 +#define LPFC_MAX_WQ_PAGE 8 +#define LPFC_MAX_CQ_PAGE 4 +#define LPFC_MAX_EQ_PAGE 8 + +#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */ +#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */ +#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */ + +/* Define SLI4 Alignment requirements. */ +#define LPFC_ALIGN_16_BYTE 16 +#define LPFC_ALIGN_64_BYTE 64 + +/* Define SLI4 specific definitions. */ +#define LPFC_MQ_CQE_BYTE_OFFSET 256 +#define LPFC_MBX_CMD_HDR_LENGTH 16 +#define LPFC_MBX_ERROR_RANGE 0x4000 +#define LPFC_BMBX_BIT1_ADDR_HI 0x2 +#define LPFC_BMBX_BIT1_ADDR_LO 0 +#define LPFC_RPI_HDR_COUNT 64 +#define LPFC_HDR_TEMPLATE_SIZE 4096 +#define LPFC_RPI_ALLOC_ERROR 0xFFFF +#define LPFC_FCF_RECORD_WD_CNT 132 +#define LPFC_ENTIRE_FCF_DATABASE 0 +#define LPFC_DFLT_FCF_INDEX 0 + +/* Virtual function numbers */ +#define LPFC_VF0 0 +#define LPFC_VF1 1 +#define LPFC_VF2 2 +#define LPFC_VF3 3 +#define LPFC_VF4 4 +#define LPFC_VF5 5 +#define LPFC_VF6 6 +#define LPFC_VF7 7 +#define LPFC_VF8 8 +#define LPFC_VF9 9 +#define LPFC_VF10 10 +#define LPFC_VF11 11 +#define LPFC_VF12 12 +#define LPFC_VF13 13 +#define LPFC_VF14 14 +#define LPFC_VF15 15 +#define LPFC_VF16 16 +#define LPFC_VF17 17 +#define LPFC_VF18 18 +#define LPFC_VF19 19 +#define LPFC_VF20 20 +#define LPFC_VF21 21 +#define LPFC_VF22 22 +#define LPFC_VF23 23 +#define LPFC_VF24 24 +#define LPFC_VF25 25 +#define LPFC_VF26 26 +#define LPFC_VF27 27 +#define LPFC_VF28 28 +#define LPFC_VF29 29 +#define LPFC_VF30 30 +#define LPFC_VF31 31 + +/* PCI function numbers */ +#define LPFC_PCI_FUNC0 0 +#define LPFC_PCI_FUNC1 1 +#define LPFC_PCI_FUNC2 2 +#define LPFC_PCI_FUNC3 3 +#define LPFC_PCI_FUNC4 4 + +/* Active interrupt test count */ +#define LPFC_ACT_INTR_CNT 4 + +/* Delay Multiplier constant */ +#define LPFC_DMULT_CONST 651042 +#define LPFC_MIM_IMAX 636 +#define LPFC_FP_DEF_IMAX 10000 +#define LPFC_SP_DEF_IMAX 10000 + +struct ulp_bde64 { + union ULP_BDE_TUS { + uint32_t w; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ +#endif +#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ +#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ +#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ +#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ +#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ +#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ +#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ + } f; + } tus; + uint32_t addrLow; + uint32_t addrHigh; +}; + +struct lpfc_sli4_flags { + uint32_t word0; +#define lpfc_fip_flag_SHIFT 0 +#define lpfc_fip_flag_MASK 0x00000001 +#define lpfc_fip_flag_WORD word0 +}; + +/* event queue entry structure */ +struct lpfc_eqe { + uint32_t word0; +#define lpfc_eqe_resource_id_SHIFT 16 +#define lpfc_eqe_resource_id_MASK 0x000000FF +#define lpfc_eqe_resource_id_WORD word0 +#define lpfc_eqe_minor_code_SHIFT 4 +#define lpfc_eqe_minor_code_MASK 0x00000FFF +#define lpfc_eqe_minor_code_WORD word0 +#define lpfc_eqe_major_code_SHIFT 1 +#define lpfc_eqe_major_code_MASK 0x00000007 +#define lpfc_eqe_major_code_WORD word0 +#define lpfc_eqe_valid_SHIFT 0 +#define lpfc_eqe_valid_MASK 0x00000001 +#define lpfc_eqe_valid_WORD word0 +}; + +/* completion queue entry structure (common fields for all cqe types) */ +struct lpfc_cqe { + uint32_t reserved0; + uint32_t reserved1; + uint32_t reserved2; + uint32_t word3; +#define lpfc_cqe_valid_SHIFT 31 +#define lpfc_cqe_valid_MASK 0x00000001 +#define lpfc_cqe_valid_WORD word3 +#define lpfc_cqe_code_SHIFT 16 +#define lpfc_cqe_code_MASK 0x000000FF +#define lpfc_cqe_code_WORD word3 +}; + +/* Completion Queue Entry Status Codes */ +#define CQE_STATUS_SUCCESS 0x0 +#define CQE_STATUS_FCP_RSP_FAILURE 0x1 +#define CQE_STATUS_REMOTE_STOP 0x2 +#define CQE_STATUS_LOCAL_REJECT 0x3 +#define CQE_STATUS_NPORT_RJT 0x4 +#define CQE_STATUS_FABRIC_RJT 0x5 +#define CQE_STATUS_NPORT_BSY 0x6 +#define CQE_STATUS_FABRIC_BSY 0x7 +#define CQE_STATUS_INTERMED_RSP 0x8 +#define CQE_STATUS_LS_RJT 0x9 +#define CQE_STATUS_CMD_REJECT 0xb +#define CQE_STATUS_FCP_TGT_LENCHECK 0xc +#define CQE_STATUS_NEED_BUFF_ENTRY 0xf + +/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ +#define CQE_HW_STATUS_NO_ERR 0x0 +#define CQE_HW_STATUS_UNDERRUN 0x1 +#define CQE_HW_STATUS_OVERRUN 0x2 + +/* Completion Queue Entry Codes */ +#define CQE_CODE_COMPL_WQE 0x1 +#define CQE_CODE_RELEASE_WQE 0x2 +#define CQE_CODE_RECEIVE 0x4 +#define CQE_CODE_XRI_ABORTED 0x5 + +/* completion queue entry for wqe completions */ +struct lpfc_wcqe_complete { + uint32_t word0; +#define lpfc_wcqe_c_request_tag_SHIFT 16 +#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF +#define lpfc_wcqe_c_request_tag_WORD word0 +#define lpfc_wcqe_c_status_SHIFT 8 +#define lpfc_wcqe_c_status_MASK 0x000000FF +#define lpfc_wcqe_c_status_WORD word0 +#define lpfc_wcqe_c_hw_status_SHIFT 0 +#define lpfc_wcqe_c_hw_status_MASK 0x000000FF +#define lpfc_wcqe_c_hw_status_WORD word0 + uint32_t total_data_placed; + uint32_t parameter; + uint32_t word3; +#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_c_xb_SHIFT 28 +#define lpfc_wcqe_c_xb_MASK 0x00000001 +#define lpfc_wcqe_c_xb_WORD word3 +#define lpfc_wcqe_c_pv_SHIFT 27 +#define lpfc_wcqe_c_pv_MASK 0x00000001 +#define lpfc_wcqe_c_pv_WORD word3 +#define lpfc_wcqe_c_priority_SHIFT 24 +#define lpfc_wcqe_c_priority_MASK 0x00000007 +#define lpfc_wcqe_c_priority_WORD word3 +#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD +}; + +/* completion queue entry for wqe release */ +struct lpfc_wcqe_release { + uint32_t reserved0; + uint32_t reserved1; + uint32_t word2; +#define lpfc_wcqe_r_wq_id_SHIFT 16 +#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF +#define lpfc_wcqe_r_wq_id_WORD word2 +#define lpfc_wcqe_r_wqe_index_SHIFT 0 +#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF +#define lpfc_wcqe_r_wqe_index_WORD word2 + uint32_t word3; +#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD +}; + +struct sli4_wcqe_xri_aborted { + uint32_t word0; +#define lpfc_wcqe_xa_status_SHIFT 8 +#define lpfc_wcqe_xa_status_MASK 0x000000FF +#define lpfc_wcqe_xa_status_WORD word0 + uint32_t parameter; + uint32_t word2; +#define lpfc_wcqe_xa_remote_xid_SHIFT 16 +#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF +#define lpfc_wcqe_xa_remote_xid_WORD word2 +#define lpfc_wcqe_xa_xri_SHIFT 0 +#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF +#define lpfc_wcqe_xa_xri_WORD word2 + uint32_t word3; +#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_wcqe_xa_ia_SHIFT 30 +#define lpfc_wcqe_xa_ia_MASK 0x00000001 +#define lpfc_wcqe_xa_ia_WORD word3 +#define CQE_XRI_ABORTED_IA_REMOTE 0 +#define CQE_XRI_ABORTED_IA_LOCAL 1 +#define lpfc_wcqe_xa_br_SHIFT 29 +#define lpfc_wcqe_xa_br_MASK 0x00000001 +#define lpfc_wcqe_xa_br_WORD word3 +#define CQE_XRI_ABORTED_BR_BA_ACC 0 +#define CQE_XRI_ABORTED_BR_BA_RJT 1 +#define lpfc_wcqe_xa_eo_SHIFT 28 +#define lpfc_wcqe_xa_eo_MASK 0x00000001 +#define lpfc_wcqe_xa_eo_WORD word3 +#define CQE_XRI_ABORTED_EO_REMOTE 0 +#define CQE_XRI_ABORTED_EO_LOCAL 1 +#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK +#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD +}; + +/* completion queue entry structure for rqe completion */ +struct lpfc_rcqe { + uint32_t word0; +#define lpfc_rcqe_bindex_SHIFT 16 +#define lpfc_rcqe_bindex_MASK 0x0000FFF +#define lpfc_rcqe_bindex_WORD word0 +#define lpfc_rcqe_status_SHIFT 8 +#define lpfc_rcqe_status_MASK 0x000000FF +#define lpfc_rcqe_status_WORD word0 +#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */ +#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ +#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ +#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ + uint32_t reserved1; + uint32_t word2; +#define lpfc_rcqe_length_SHIFT 16 +#define lpfc_rcqe_length_MASK 0x0000FFFF +#define lpfc_rcqe_length_WORD word2 +#define lpfc_rcqe_rq_id_SHIFT 6 +#define lpfc_rcqe_rq_id_MASK 0x000003FF +#define lpfc_rcqe_rq_id_WORD word2 +#define lpfc_rcqe_fcf_id_SHIFT 0 +#define lpfc_rcqe_fcf_id_MASK 0x0000003F +#define lpfc_rcqe_fcf_id_WORD word2 + uint32_t word3; +#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT +#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK +#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD +#define lpfc_rcqe_port_SHIFT 30 +#define lpfc_rcqe_port_MASK 0x00000001 +#define lpfc_rcqe_port_WORD word3 +#define lpfc_rcqe_hdr_length_SHIFT 24 +#define lpfc_rcqe_hdr_length_MASK 0x0000001F +#define lpfc_rcqe_hdr_length_WORD word3 +#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT +#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK +#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD +#define lpfc_rcqe_eof_SHIFT 8 +#define lpfc_rcqe_eof_MASK 0x000000FF +#define lpfc_rcqe_eof_WORD word3 +#define FCOE_EOFn 0x41 +#define FCOE_EOFt 0x42 +#define FCOE_EOFni 0x49 +#define FCOE_EOFa 0x50 +#define lpfc_rcqe_sof_SHIFT 0 +#define lpfc_rcqe_sof_MASK 0x000000FF +#define lpfc_rcqe_sof_WORD word3 +#define FCOE_SOFi2 0x2d +#define FCOE_SOFi3 0x2e +#define FCOE_SOFn2 0x35 +#define FCOE_SOFn3 0x36 +}; + +struct lpfc_wqe_generic{ + struct ulp_bde64 bde; + uint32_t word3; + uint32_t word4; + uint32_t word5; + uint32_t word6; +#define lpfc_wqe_gen_context_SHIFT 16 +#define lpfc_wqe_gen_context_MASK 0x0000FFFF +#define lpfc_wqe_gen_context_WORD word6 +#define lpfc_wqe_gen_xri_SHIFT 0 +#define lpfc_wqe_gen_xri_MASK 0x0000FFFF +#define lpfc_wqe_gen_xri_WORD word6 + uint32_t word7; +#define lpfc_wqe_gen_lnk_SHIFT 23 +#define lpfc_wqe_gen_lnk_MASK 0x00000001 +#define lpfc_wqe_gen_lnk_WORD word7 +#define lpfc_wqe_gen_erp_SHIFT 22 +#define lpfc_wqe_gen_erp_MASK 0x00000001 +#define lpfc_wqe_gen_erp_WORD word7 +#define lpfc_wqe_gen_pu_SHIFT 20 +#define lpfc_wqe_gen_pu_MASK 0x00000003 +#define lpfc_wqe_gen_pu_WORD word7 +#define lpfc_wqe_gen_class_SHIFT 16 +#define lpfc_wqe_gen_class_MASK 0x00000007 +#define lpfc_wqe_gen_class_WORD word7 +#define lpfc_wqe_gen_command_SHIFT 8 +#define lpfc_wqe_gen_command_MASK 0x000000FF +#define lpfc_wqe_gen_command_WORD word7 +#define lpfc_wqe_gen_status_SHIFT 4 +#define lpfc_wqe_gen_status_MASK 0x0000000F +#define lpfc_wqe_gen_status_WORD word7 +#define lpfc_wqe_gen_ct_SHIFT 2 +#define lpfc_wqe_gen_ct_MASK 0x00000007 +#define lpfc_wqe_gen_ct_WORD word7 + uint32_t abort_tag; + uint32_t word9; +#define lpfc_wqe_gen_request_tag_SHIFT 0 +#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF +#define lpfc_wqe_gen_request_tag_WORD word9 + uint32_t word10; +#define lpfc_wqe_gen_ccp_SHIFT 24 +#define lpfc_wqe_gen_ccp_MASK 0x000000FF +#define lpfc_wqe_gen_ccp_WORD word10 +#define lpfc_wqe_gen_ccpe_SHIFT 23 +#define lpfc_wqe_gen_ccpe_MASK 0x00000001 +#define lpfc_wqe_gen_ccpe_WORD word10 +#define lpfc_wqe_gen_pv_SHIFT 19 +#define lpfc_wqe_gen_pv_MASK 0x00000001 +#define lpfc_wqe_gen_pv_WORD word10 +#define lpfc_wqe_gen_pri_SHIFT 16 +#define lpfc_wqe_gen_pri_MASK 0x00000007 +#define lpfc_wqe_gen_pri_WORD word10 + uint32_t word11; +#define lpfc_wqe_gen_cq_id_SHIFT 16 +#define lpfc_wqe_gen_cq_id_MASK 0x000003FF +#define lpfc_wqe_gen_cq_id_WORD word11 +#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff +#define lpfc_wqe_gen_wqec_SHIFT 7 +#define lpfc_wqe_gen_wqec_MASK 0x00000001 +#define lpfc_wqe_gen_wqec_WORD word11 +#define lpfc_wqe_gen_cmd_type_SHIFT 0 +#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F +#define lpfc_wqe_gen_cmd_type_WORD word11 + uint32_t payload[4]; +}; + +struct lpfc_rqe { + uint32_t address_hi; + uint32_t address_lo; +}; + +/* buffer descriptors */ +struct lpfc_bde4 { + uint32_t addr_hi; + uint32_t addr_lo; + uint32_t word2; +#define lpfc_bde4_last_SHIFT 31 +#define lpfc_bde4_last_MASK 0x00000001 +#define lpfc_bde4_last_WORD word2 +#define lpfc_bde4_sge_offset_SHIFT 0 +#define lpfc_bde4_sge_offset_MASK 0x000003FF +#define lpfc_bde4_sge_offset_WORD word2 + uint32_t word3; +#define lpfc_bde4_length_SHIFT 0 +#define lpfc_bde4_length_MASK 0x000000FF +#define lpfc_bde4_length_WORD word3 +}; + +struct lpfc_register { + uint32_t word0; +}; + +#define LPFC_UERR_STATUS_HI 0x00A4 +#define LPFC_UERR_STATUS_LO 0x00A0 +#define LPFC_ONLINE0 0x00B0 +#define LPFC_ONLINE1 0x00B4 +#define LPFC_SCRATCHPAD 0x0058 + +/* BAR0 Registers */ +#define LPFC_HST_STATE 0x00AC +#define lpfc_hst_state_perr_SHIFT 31 +#define lpfc_hst_state_perr_MASK 0x1 +#define lpfc_hst_state_perr_WORD word0 +#define lpfc_hst_state_sfi_SHIFT 30 +#define lpfc_hst_state_sfi_MASK 0x1 +#define lpfc_hst_state_sfi_WORD word0 +#define lpfc_hst_state_nip_SHIFT 29 +#define lpfc_hst_state_nip_MASK 0x1 +#define lpfc_hst_state_nip_WORD word0 +#define lpfc_hst_state_ipc_SHIFT 28 +#define lpfc_hst_state_ipc_MASK 0x1 +#define lpfc_hst_state_ipc_WORD word0 +#define lpfc_hst_state_xrom_SHIFT 27 +#define lpfc_hst_state_xrom_MASK 0x1 +#define lpfc_hst_state_xrom_WORD word0 +#define lpfc_hst_state_dl_SHIFT 26 +#define lpfc_hst_state_dl_MASK 0x1 +#define lpfc_hst_state_dl_WORD word0 +#define lpfc_hst_state_port_status_SHIFT 0 +#define lpfc_hst_state_port_status_MASK 0xFFFF +#define lpfc_hst_state_port_status_WORD word0 + +#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 +#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 +#define LPFC_POST_STAGE_HOST_RDY 0x0002 +#define LPFC_POST_STAGE_BE_RESET 0x0003 +#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100 +#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101 +#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200 +#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201 +#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300 +#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301 +#define LPFC_POST_STAGE_DDR_TEST_START 0x0400 +#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401 +#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600 +#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601 +#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700 +#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701 +#define LPFC_POST_STAGE_ARMFW_START 0x0800 +#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900 +#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901 +#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00 +#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01 +#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00 +#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01 +#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02 +#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03 +#define LPFC_POST_STAGE_PARSE_XML 0x0B04 +#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05 +#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06 +#define LPFC_POST_STAGE_RC_DONE 0x0B07 +#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 +#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 +#define LPFC_POST_STAGE_ARMFW_READY 0xC000 +#define LPFC_POST_STAGE_ARMFW_UE 0xF000 + +#define lpfc_scratchpad_slirev_SHIFT 4 +#define lpfc_scratchpad_slirev_MASK 0xF +#define lpfc_scratchpad_slirev_WORD word0 +#define lpfc_scratchpad_chiptype_SHIFT 8 +#define lpfc_scratchpad_chiptype_MASK 0xFF +#define lpfc_scratchpad_chiptype_WORD word0 +#define lpfc_scratchpad_featurelevel1_SHIFT 16 +#define lpfc_scratchpad_featurelevel1_MASK 0xFF +#define lpfc_scratchpad_featurelevel1_WORD word0 +#define lpfc_scratchpad_featurelevel2_SHIFT 24 +#define lpfc_scratchpad_featurelevel2_MASK 0xFF +#define lpfc_scratchpad_featurelevel2_WORD word0 + +/* BAR1 Registers */ +#define LPFC_IMR_MASK_ALL 0xFFFFFFFF +#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF + +#define LPFC_HST_ISR0 0x0C18 +#define LPFC_HST_ISR1 0x0C1C +#define LPFC_HST_ISR2 0x0C20 +#define LPFC_HST_ISR3 0x0C24 +#define LPFC_HST_ISR4 0x0C28 + +#define LPFC_HST_IMR0 0x0C48 +#define LPFC_HST_IMR1 0x0C4C +#define LPFC_HST_IMR2 0x0C50 +#define LPFC_HST_IMR3 0x0C54 +#define LPFC_HST_IMR4 0x0C58 + +#define LPFC_HST_ISCR0 0x0C78 +#define LPFC_HST_ISCR1 0x0C7C +#define LPFC_HST_ISCR2 0x0C80 +#define LPFC_HST_ISCR3 0x0C84 +#define LPFC_HST_ISCR4 0x0C88 + +#define LPFC_SLI4_INTR0 BIT0 +#define LPFC_SLI4_INTR1 BIT1 +#define LPFC_SLI4_INTR2 BIT2 +#define LPFC_SLI4_INTR3 BIT3 +#define LPFC_SLI4_INTR4 BIT4 +#define LPFC_SLI4_INTR5 BIT5 +#define LPFC_SLI4_INTR6 BIT6 +#define LPFC_SLI4_INTR7 BIT7 +#define LPFC_SLI4_INTR8 BIT8 +#define LPFC_SLI4_INTR9 BIT9 +#define LPFC_SLI4_INTR10 BIT10 +#define LPFC_SLI4_INTR11 BIT11 +#define LPFC_SLI4_INTR12 BIT12 +#define LPFC_SLI4_INTR13 BIT13 +#define LPFC_SLI4_INTR14 BIT14 +#define LPFC_SLI4_INTR15 BIT15 +#define LPFC_SLI4_INTR16 BIT16 +#define LPFC_SLI4_INTR17 BIT17 +#define LPFC_SLI4_INTR18 BIT18 +#define LPFC_SLI4_INTR19 BIT19 +#define LPFC_SLI4_INTR20 BIT20 +#define LPFC_SLI4_INTR21 BIT21 +#define LPFC_SLI4_INTR22 BIT22 +#define LPFC_SLI4_INTR23 BIT23 +#define LPFC_SLI4_INTR24 BIT24 +#define LPFC_SLI4_INTR25 BIT25 +#define LPFC_SLI4_INTR26 BIT26 +#define LPFC_SLI4_INTR27 BIT27 +#define LPFC_SLI4_INTR28 BIT28 +#define LPFC_SLI4_INTR29 BIT29 +#define LPFC_SLI4_INTR30 BIT30 +#define LPFC_SLI4_INTR31 BIT31 + +/* BAR2 Registers */ +#define LPFC_RQ_DOORBELL 0x00A0 +#define lpfc_rq_doorbell_num_posted_SHIFT 16 +#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF +#define lpfc_rq_doorbell_num_posted_WORD word0 +#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ +#define lpfc_rq_doorbell_id_SHIFT 0 +#define lpfc_rq_doorbell_id_MASK 0x03FF +#define lpfc_rq_doorbell_id_WORD word0 + +#define LPFC_WQ_DOORBELL 0x0040 +#define lpfc_wq_doorbell_num_posted_SHIFT 24 +#define lpfc_wq_doorbell_num_posted_MASK 0x00FF +#define lpfc_wq_doorbell_num_posted_WORD word0 +#define lpfc_wq_doorbell_index_SHIFT 16 +#define lpfc_wq_doorbell_index_MASK 0x00FF +#define lpfc_wq_doorbell_index_WORD word0 +#define lpfc_wq_doorbell_id_SHIFT 0 +#define lpfc_wq_doorbell_id_MASK 0xFFFF +#define lpfc_wq_doorbell_id_WORD word0 + +#define LPFC_EQCQ_DOORBELL 0x0120 +#define lpfc_eqcq_doorbell_arm_SHIFT 29 +#define lpfc_eqcq_doorbell_arm_MASK 0x0001 +#define lpfc_eqcq_doorbell_arm_WORD word0 +#define lpfc_eqcq_doorbell_num_released_SHIFT 16 +#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF +#define lpfc_eqcq_doorbell_num_released_WORD word0 +#define lpfc_eqcq_doorbell_qt_SHIFT 10 +#define lpfc_eqcq_doorbell_qt_MASK 0x0001 +#define lpfc_eqcq_doorbell_qt_WORD word0 +#define LPFC_QUEUE_TYPE_COMPLETION 0 +#define LPFC_QUEUE_TYPE_EVENT 1 +#define lpfc_eqcq_doorbell_eqci_SHIFT 9 +#define lpfc_eqcq_doorbell_eqci_MASK 0x0001 +#define lpfc_eqcq_doorbell_eqci_WORD word0 +#define lpfc_eqcq_doorbell_cqid_SHIFT 0 +#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF +#define lpfc_eqcq_doorbell_cqid_WORD word0 +#define lpfc_eqcq_doorbell_eqid_SHIFT 0 +#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF +#define lpfc_eqcq_doorbell_eqid_WORD word0 + +#define LPFC_BMBX 0x0160 +#define lpfc_bmbx_addr_SHIFT 2 +#define lpfc_bmbx_addr_MASK 0x3FFFFFFF +#define lpfc_bmbx_addr_WORD word0 +#define lpfc_bmbx_hi_SHIFT 1 +#define lpfc_bmbx_hi_MASK 0x0001 +#define lpfc_bmbx_hi_WORD word0 +#define lpfc_bmbx_rdy_SHIFT 0 +#define lpfc_bmbx_rdy_MASK 0x0001 +#define lpfc_bmbx_rdy_WORD word0 + +#define LPFC_MQ_DOORBELL 0x0140 +#define lpfc_mq_doorbell_num_posted_SHIFT 16 +#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF +#define lpfc_mq_doorbell_num_posted_WORD word0 +#define lpfc_mq_doorbell_id_SHIFT 0 +#define lpfc_mq_doorbell_id_MASK 0x03FF +#define lpfc_mq_doorbell_id_WORD word0 + +struct lpfc_sli4_cfg_mhdr { + uint32_t word1; +#define lpfc_mbox_hdr_emb_SHIFT 0 +#define lpfc_mbox_hdr_emb_MASK 0x00000001 +#define lpfc_mbox_hdr_emb_WORD word1 +#define lpfc_mbox_hdr_sge_cnt_SHIFT 3 +#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F +#define lpfc_mbox_hdr_sge_cnt_WORD word1 + uint32_t payload_length; + uint32_t tag_lo; + uint32_t tag_hi; + uint32_t reserved5; +}; + +union lpfc_sli4_cfg_shdr { + struct { + uint32_t word6; +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_port_number_SHIFT 16 +#define lpfc_mbox_hdr_port_number_MASK 0x000000FF +#define lpfc_mbox_hdr_port_number_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 + uint32_t timeout; + uint32_t request_length; + uint32_t reserved9; + } request; + struct { + uint32_t word6; +#define lpfc_mbox_hdr_opcode_SHIFT 0 +#define lpfc_mbox_hdr_opcode_MASK 0x000000FF +#define lpfc_mbox_hdr_opcode_WORD word6 +#define lpfc_mbox_hdr_subsystem_SHIFT 8 +#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF +#define lpfc_mbox_hdr_subsystem_WORD word6 +#define lpfc_mbox_hdr_domain_SHIFT 24 +#define lpfc_mbox_hdr_domain_MASK 0x000000FF +#define lpfc_mbox_hdr_domain_WORD word6 + uint32_t word7; +#define lpfc_mbox_hdr_status_SHIFT 0 +#define lpfc_mbox_hdr_status_MASK 0x000000FF +#define lpfc_mbox_hdr_status_WORD word7 +#define lpfc_mbox_hdr_add_status_SHIFT 8 +#define lpfc_mbox_hdr_add_status_MASK 0x000000FF +#define lpfc_mbox_hdr_add_status_WORD word7 + uint32_t response_length; + uint32_t actual_response_length; + } response; +}; + +/* Mailbox structures */ +struct mbox_header { + struct lpfc_sli4_cfg_mhdr cfg_mhdr; + union lpfc_sli4_cfg_shdr cfg_shdr; +}; + +/* Subsystem Definitions */ +#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 +#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC + +/* Device Specific Definitions */ + +/* The HOST ENDIAN defines are in Big Endian format. */ +#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF +#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF + +/* Common Opcodes */ +#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C +#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D +#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 +#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 +#define LPFC_MBOX_OPCODE_NOP 0x21 +#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 +#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 +#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 +#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D + +/* FCoE Opcodes */ +#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 +#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02 +#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03 +#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04 +#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05 +#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06 +#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08 +#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 +#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A +#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B + +/* Mailbox command structures */ +struct eq_context { + uint32_t word0; +#define lpfc_eq_context_size_SHIFT 31 +#define lpfc_eq_context_size_MASK 0x00000001 +#define lpfc_eq_context_size_WORD word0 +#define LPFC_EQE_SIZE_4 0x0 +#define LPFC_EQE_SIZE_16 0x1 +#define lpfc_eq_context_valid_SHIFT 29 +#define lpfc_eq_context_valid_MASK 0x00000001 +#define lpfc_eq_context_valid_WORD word0 + uint32_t word1; +#define lpfc_eq_context_count_SHIFT 26 +#define lpfc_eq_context_count_MASK 0x00000003 +#define lpfc_eq_context_count_WORD word1 +#define LPFC_EQ_CNT_256 0x0 +#define LPFC_EQ_CNT_512 0x1 +#define LPFC_EQ_CNT_1024 0x2 +#define LPFC_EQ_CNT_2048 0x3 +#define LPFC_EQ_CNT_4096 0x4 + uint32_t word2; +#define lpfc_eq_context_delay_multi_SHIFT 13 +#define lpfc_eq_context_delay_multi_MASK 0x000003FF +#define lpfc_eq_context_delay_multi_WORD word2 + uint32_t reserved3; +}; + +struct sgl_page_pairs { + uint32_t sgl_pg0_addr_lo; + uint32_t sgl_pg0_addr_hi; + uint32_t sgl_pg1_addr_lo; + uint32_t sgl_pg1_addr_hi; +}; + +struct lpfc_mbx_post_sgl_pages { + struct mbox_header header; + uint32_t word0; +#define lpfc_post_sgl_pages_xri_SHIFT 0 +#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF +#define lpfc_post_sgl_pages_xri_WORD word0 +#define lpfc_post_sgl_pages_xricnt_SHIFT 16 +#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF +#define lpfc_post_sgl_pages_xricnt_WORD word0 + struct sgl_page_pairs sgl_pg_pairs[1]; +}; + +/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */ +struct lpfc_mbx_post_uembed_sgl_page1 { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word0; + struct sgl_page_pairs sgl_pg_pairs; +}; + +struct lpfc_mbx_sge { + uint32_t pa_lo; + uint32_t pa_hi; + uint32_t length; +}; + +struct lpfc_mbx_nembed_cmd { + struct lpfc_sli4_cfg_mhdr cfg_mhdr; +#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19 + struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES]; +}; + +struct lpfc_mbx_nembed_sge_virt { + void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES]; +}; + +struct lpfc_mbx_eq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_eq_create_num_pages_SHIFT 0 +#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_eq_create_num_pages_WORD word0 + struct eq_context context; + struct dma_address page[LPFC_MAX_EQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_eq_create_q_id_SHIFT 0 +#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_eq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_eq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_eq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_eq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct lpfc_mbx_nop { + struct mbox_header header; + uint32_t context[2]; +}; + +struct cq_context { + uint32_t word0; +#define lpfc_cq_context_event_SHIFT 31 +#define lpfc_cq_context_event_MASK 0x00000001 +#define lpfc_cq_context_event_WORD word0 +#define lpfc_cq_context_valid_SHIFT 29 +#define lpfc_cq_context_valid_MASK 0x00000001 +#define lpfc_cq_context_valid_WORD word0 +#define lpfc_cq_context_count_SHIFT 27 +#define lpfc_cq_context_count_MASK 0x00000003 +#define lpfc_cq_context_count_WORD word0 +#define LPFC_CQ_CNT_256 0x0 +#define LPFC_CQ_CNT_512 0x1 +#define LPFC_CQ_CNT_1024 0x2 + uint32_t word1; +#define lpfc_cq_eq_id_SHIFT 22 +#define lpfc_cq_eq_id_MASK 0x000000FF +#define lpfc_cq_eq_id_WORD word1 + uint32_t reserved0; + uint32_t reserved1; +}; + +struct lpfc_mbx_cq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_num_pages_SHIFT 0 +#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_num_pages_WORD word0 + struct cq_context context; + struct dma_address page[LPFC_MAX_CQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_cq_create_q_id_SHIFT 0 +#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_cq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_cq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_cq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct wq_context { + uint32_t reserved0; + uint32_t reserved1; + uint32_t reserved2; + uint32_t reserved3; +}; + +struct lpfc_mbx_wq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_wq_create_num_pages_SHIFT 0 +#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_num_pages_WORD word0 +#define lpfc_mbx_wq_create_cq_id_SHIFT 16 +#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_cq_id_WORD word0 + struct dma_address page[LPFC_MAX_WQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_wq_create_q_id_SHIFT 0 +#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_wq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_wq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_wq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +#define LPFC_HDR_BUF_SIZE 128 +#define LPFC_DATA_BUF_SIZE 4096 +struct rq_context { + uint32_t word0; +#define lpfc_rq_context_rq_size_SHIFT 16 +#define lpfc_rq_context_rq_size_MASK 0x0000000F +#define lpfc_rq_context_rq_size_WORD word0 +#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ +#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ +#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ +#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ + uint32_t reserved1; + uint32_t word2; +#define lpfc_rq_context_cq_id_SHIFT 16 +#define lpfc_rq_context_cq_id_MASK 0x000003FF +#define lpfc_rq_context_cq_id_WORD word2 +#define lpfc_rq_context_buf_size_SHIFT 0 +#define lpfc_rq_context_buf_size_MASK 0x0000FFFF +#define lpfc_rq_context_buf_size_WORD word2 + uint32_t reserved3; +}; + +struct lpfc_mbx_rq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_num_pages_SHIFT 0 +#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_num_pages_WORD word0 + struct rq_context context; + struct dma_address page[LPFC_MAX_WQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_rq_create_q_id_SHIFT 0 +#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_rq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_rq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_rq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct mq_context { + uint32_t word0; +#define lpfc_mq_context_cq_id_SHIFT 22 +#define lpfc_mq_context_cq_id_MASK 0x000003FF +#define lpfc_mq_context_cq_id_WORD word0 +#define lpfc_mq_context_count_SHIFT 16 +#define lpfc_mq_context_count_MASK 0x0000000F +#define lpfc_mq_context_count_WORD word0 +#define LPFC_MQ_CNT_16 0x5 +#define LPFC_MQ_CNT_32 0x6 +#define LPFC_MQ_CNT_64 0x7 +#define LPFC_MQ_CNT_128 0x8 + uint32_t word1; +#define lpfc_mq_context_valid_SHIFT 31 +#define lpfc_mq_context_valid_MASK 0x00000001 +#define lpfc_mq_context_valid_WORD word1 + uint32_t reserved2; + uint32_t reserved3; +}; + +struct lpfc_mbx_mq_create { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_num_pages_SHIFT 0 +#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_num_pages_WORD word0 + struct mq_context context; + struct dma_address page[LPFC_MAX_MQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_q_id_SHIFT 0 +#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_q_id_WORD word0 + } response; + } u; +}; + +struct lpfc_mbx_mq_destroy { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_destroy_q_id_SHIFT 0 +#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_destroy_q_id_WORD word0 + } request; + struct { + uint32_t word0; + } response; + } u; +}; + +struct lpfc_mbx_post_hdr_tmpl { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0 +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF +#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10 +#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16 +#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF +#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10 + uint32_t rpi_paddr_lo; + uint32_t rpi_paddr_hi; +}; + +struct sli4_sge { /* SLI-4 */ + uint32_t addr_hi; + uint32_t addr_lo; + + uint32_t word2; +#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ +#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF +#define lpfc_sli4_sge_offset_WORD word2 +#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets + this flag !! */ +#define lpfc_sli4_sge_last_MASK 0x00000001 +#define lpfc_sli4_sge_last_WORD word2 + uint32_t word3; +#define lpfc_sli4_sge_len_SHIFT 0 +#define lpfc_sli4_sge_len_MASK 0x0001FFFF +#define lpfc_sli4_sge_len_WORD word3 +}; + +struct fcf_record { + uint32_t max_rcv_size; + uint32_t fka_adv_period; + uint32_t fip_priority; + uint32_t word3; +#define lpfc_fcf_record_mac_0_SHIFT 0 +#define lpfc_fcf_record_mac_0_MASK 0x000000FF +#define lpfc_fcf_record_mac_0_WORD word3 +#define lpfc_fcf_record_mac_1_SHIFT 8 +#define lpfc_fcf_record_mac_1_MASK 0x000000FF +#define lpfc_fcf_record_mac_1_WORD word3 +#define lpfc_fcf_record_mac_2_SHIFT 16 +#define lpfc_fcf_record_mac_2_MASK 0x000000FF +#define lpfc_fcf_record_mac_2_WORD word3 +#define lpfc_fcf_record_mac_3_SHIFT 24 +#define lpfc_fcf_record_mac_3_MASK 0x000000FF +#define lpfc_fcf_record_mac_3_WORD word3 + uint32_t word4; +#define lpfc_fcf_record_mac_4_SHIFT 0 +#define lpfc_fcf_record_mac_4_MASK 0x000000FF +#define lpfc_fcf_record_mac_4_WORD word4 +#define lpfc_fcf_record_mac_5_SHIFT 8 +#define lpfc_fcf_record_mac_5_MASK 0x000000FF +#define lpfc_fcf_record_mac_5_WORD word4 +#define lpfc_fcf_record_fcf_avail_SHIFT 16 +#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF +#define lpfc_fcf_record_fc_avail_WORD word4 +#define lpfc_fcf_record_mac_addr_prov_SHIFT 24 +#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF +#define lpfc_fcf_record_mac_addr_prov_WORD word4 +#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */ +#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */ + uint32_t word5; +#define lpfc_fcf_record_fab_name_0_SHIFT 0 +#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_0_WORD word5 +#define lpfc_fcf_record_fab_name_1_SHIFT 8 +#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_1_WORD word5 +#define lpfc_fcf_record_fab_name_2_SHIFT 16 +#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_2_WORD word5 +#define lpfc_fcf_record_fab_name_3_SHIFT 24 +#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_3_WORD word5 + uint32_t word6; +#define lpfc_fcf_record_fab_name_4_SHIFT 0 +#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_4_WORD word6 +#define lpfc_fcf_record_fab_name_5_SHIFT 8 +#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_5_WORD word6 +#define lpfc_fcf_record_fab_name_6_SHIFT 16 +#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_6_WORD word6 +#define lpfc_fcf_record_fab_name_7_SHIFT 24 +#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF +#define lpfc_fcf_record_fab_name_7_WORD word6 + uint32_t word7; +#define lpfc_fcf_record_fc_map_0_SHIFT 0 +#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_0_WORD word7 +#define lpfc_fcf_record_fc_map_1_SHIFT 8 +#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_1_WORD word7 +#define lpfc_fcf_record_fc_map_2_SHIFT 16 +#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF +#define lpfc_fcf_record_fc_map_2_WORD word7 +#define lpfc_fcf_record_fcf_valid_SHIFT 24 +#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF +#define lpfc_fcf_record_fcf_valid_WORD word7 + uint32_t word8; +#define lpfc_fcf_record_fcf_index_SHIFT 0 +#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF +#define lpfc_fcf_record_fcf_index_WORD word8 +#define lpfc_fcf_record_fcf_state_SHIFT 16 +#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF +#define lpfc_fcf_record_fcf_state_WORD word8 + uint8_t vlan_bitmap[512]; +}; + +struct lpfc_mbx_read_fcf_tbl { + union lpfc_sli4_cfg_shdr cfg_shdr; + union { + struct { + uint32_t word10; +#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0 +#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF +#define lpfc_mbx_read_fcf_tbl_indx_WORD word10 + } request; + struct { + uint32_t eventag; + } response; + } u; + uint32_t word11; +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0 +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF +#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11 +}; + +struct lpfc_mbx_add_fcf_tbl_entry { + union lpfc_sli4_cfg_shdr cfg_shdr; + uint32_t word10; +#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0 +#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF +#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10 + struct lpfc_mbx_sge fcf_sge; +}; + +struct lpfc_mbx_del_fcf_tbl_entry { + struct mbox_header header; + uint32_t word10; +#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0 +#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF +#define lpfc_mbx_del_fcf_tbl_count_WORD word10 +#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16 +#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF +#define lpfc_mbx_del_fcf_tbl_index_WORD word10 +}; + +/* Status field for embedded SLI_CONFIG mailbox command */ +#define STATUS_SUCCESS 0x0 +#define STATUS_FAILED 0x1 +#define STATUS_ILLEGAL_REQUEST 0x2 +#define STATUS_ILLEGAL_FIELD 0x3 +#define STATUS_INSUFFICIENT_BUFFER 0x4 +#define STATUS_UNAUTHORIZED_REQUEST 0x5 +#define STATUS_FLASHROM_SAVE_FAILED 0x17 +#define STATUS_FLASHROM_RESTORE_FAILED 0x18 +#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a +#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b +#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c +#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d +#define STATUS_ASSERT_FAILED 0x1e +#define STATUS_INVALID_SESSION 0x1f +#define STATUS_INVALID_CONNECTION 0x20 +#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21 +#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24 +#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25 +#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26 +#define STATUS_FLASHROM_READ_FAILED 0x27 +#define STATUS_POLL_IOCTL_TIMEOUT 0x28 +#define STATUS_ERROR_ACITMAIN 0x2a +#define STATUS_REBOOT_REQUIRED 0x2c +#define STATUS_FCF_IN_USE 0x3a + +struct lpfc_mbx_sli4_config { + struct mbox_header header; +}; + +struct lpfc_mbx_init_vfi { + uint32_t word1; +#define lpfc_init_vfi_vr_SHIFT 31 +#define lpfc_init_vfi_vr_MASK 0x00000001 +#define lpfc_init_vfi_vr_WORD word1 +#define lpfc_init_vfi_vt_SHIFT 30 +#define lpfc_init_vfi_vt_MASK 0x00000001 +#define lpfc_init_vfi_vt_WORD word1 +#define lpfc_init_vfi_vf_SHIFT 29 +#define lpfc_init_vfi_vf_MASK 0x00000001 +#define lpfc_init_vfi_vf_WORD word1 +#define lpfc_init_vfi_vfi_SHIFT 0 +#define lpfc_init_vfi_vfi_MASK 0x0000FFFF +#define lpfc_init_vfi_vfi_WORD word1 + uint32_t word2; +#define lpfc_init_vfi_fcfi_SHIFT 0 +#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF +#define lpfc_init_vfi_fcfi_WORD word2 + uint32_t word3; +#define lpfc_init_vfi_pri_SHIFT 13 +#define lpfc_init_vfi_pri_MASK 0x00000007 +#define lpfc_init_vfi_pri_WORD word3 +#define lpfc_init_vfi_vf_id_SHIFT 1 +#define lpfc_init_vfi_vf_id_MASK 0x00000FFF +#define lpfc_init_vfi_vf_id_WORD word3 + uint32_t word4; +#define lpfc_init_vfi_hop_count_SHIFT 24 +#define lpfc_init_vfi_hop_count_MASK 0x000000FF +#define lpfc_init_vfi_hop_count_WORD word4 +}; + +struct lpfc_mbx_reg_vfi { + uint32_t word1; +#define lpfc_reg_vfi_vp_SHIFT 28 +#define lpfc_reg_vfi_vp_MASK 0x00000001 +#define lpfc_reg_vfi_vp_WORD word1 +#define lpfc_reg_vfi_vfi_SHIFT 0 +#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF +#define lpfc_reg_vfi_vfi_WORD word1 + uint32_t word2; +#define lpfc_reg_vfi_vpi_SHIFT 16 +#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF +#define lpfc_reg_vfi_vpi_WORD word2 +#define lpfc_reg_vfi_fcfi_SHIFT 0 +#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF +#define lpfc_reg_vfi_fcfi_WORD word2 + uint32_t word3_rsvd; + uint32_t word4_rsvd; + struct ulp_bde64 bde; + uint32_t word8_rsvd; + uint32_t word9_rsvd; + uint32_t word10; +#define lpfc_reg_vfi_nport_id_SHIFT 0 +#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF +#define lpfc_reg_vfi_nport_id_WORD word10 +}; + +struct lpfc_mbx_init_vpi { + uint32_t word1; +#define lpfc_init_vpi_vfi_SHIFT 16 +#define lpfc_init_vpi_vfi_MASK 0x0000FFFF +#define lpfc_init_vpi_vfi_WORD word1 +#define lpfc_init_vpi_vpi_SHIFT 0 +#define lpfc_init_vpi_vpi_MASK 0x0000FFFF +#define lpfc_init_vpi_vpi_WORD word1 +}; + +struct lpfc_mbx_read_vpi { + uint32_t word1_rsvd; + uint32_t word2; +#define lpfc_mbx_read_vpi_vnportid_SHIFT 0 +#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF +#define lpfc_mbx_read_vpi_vnportid_WORD word2 + uint32_t word3_rsvd; + uint32_t word4; +#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0 +#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF +#define lpfc_mbx_read_vpi_acq_alpa_WORD word4 +#define lpfc_mbx_read_vpi_pb_SHIFT 15 +#define lpfc_mbx_read_vpi_pb_MASK 0x00000001 +#define lpfc_mbx_read_vpi_pb_WORD word4 +#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16 +#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF +#define lpfc_mbx_read_vpi_spec_alpa_WORD word4 +#define lpfc_mbx_read_vpi_ns_SHIFT 30 +#define lpfc_mbx_read_vpi_ns_MASK 0x00000001 +#define lpfc_mbx_read_vpi_ns_WORD word4 +#define lpfc_mbx_read_vpi_hl_SHIFT 31 +#define lpfc_mbx_read_vpi_hl_MASK 0x00000001 +#define lpfc_mbx_read_vpi_hl_WORD word4 + uint32_t word5_rsvd; + uint32_t word6; +#define lpfc_mbx_read_vpi_vpi_SHIFT 0 +#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF +#define lpfc_mbx_read_vpi_vpi_WORD word6 + uint32_t word7; +#define lpfc_mbx_read_vpi_mac_0_SHIFT 0 +#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_0_WORD word7 +#define lpfc_mbx_read_vpi_mac_1_SHIFT 8 +#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_1_WORD word7 +#define lpfc_mbx_read_vpi_mac_2_SHIFT 16 +#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_2_WORD word7 +#define lpfc_mbx_read_vpi_mac_3_SHIFT 24 +#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_3_WORD word7 + uint32_t word8; +#define lpfc_mbx_read_vpi_mac_4_SHIFT 0 +#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_4_WORD word8 +#define lpfc_mbx_read_vpi_mac_5_SHIFT 8 +#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF +#define lpfc_mbx_read_vpi_mac_5_WORD word8 +#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16 +#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF +#define lpfc_mbx_read_vpi_vlan_tag_WORD word8 +#define lpfc_mbx_read_vpi_vv_SHIFT 28 +#define lpfc_mbx_read_vpi_vv_MASK 0x0000001 +#define lpfc_mbx_read_vpi_vv_WORD word8 +}; + +struct lpfc_mbx_unreg_vfi { + uint32_t word1_rsvd; + uint32_t word2; +#define lpfc_unreg_vfi_vfi_SHIFT 0 +#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF +#define lpfc_unreg_vfi_vfi_WORD word2 +}; + +struct lpfc_mbx_resume_rpi { + uint32_t word1; +#define lpfc_resume_rpi_rpi_SHIFT 0 +#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF +#define lpfc_resume_rpi_rpi_WORD word1 + uint32_t event_tag; + uint32_t word3_rsvd; + uint32_t word4_rsvd; + uint32_t word5_rsvd; + uint32_t word6; +#define lpfc_resume_rpi_vpi_SHIFT 0 +#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF +#define lpfc_resume_rpi_vpi_WORD word6 +#define lpfc_resume_rpi_vfi_SHIFT 16 +#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF +#define lpfc_resume_rpi_vfi_WORD word6 +}; + +#define REG_FCF_INVALID_QID 0xFFFF +struct lpfc_mbx_reg_fcfi { + uint32_t word1; +#define lpfc_reg_fcfi_info_index_SHIFT 0 +#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF +#define lpfc_reg_fcfi_info_index_WORD word1 +#define lpfc_reg_fcfi_fcfi_SHIFT 16 +#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF +#define lpfc_reg_fcfi_fcfi_WORD word1 + uint32_t word2; +#define lpfc_reg_fcfi_rq_id1_SHIFT 0 +#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id1_WORD word2 +#define lpfc_reg_fcfi_rq_id0_SHIFT 16 +#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id0_WORD word2 + uint32_t word3; +#define lpfc_reg_fcfi_rq_id3_SHIFT 0 +#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id3_WORD word3 +#define lpfc_reg_fcfi_rq_id2_SHIFT 16 +#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF +#define lpfc_reg_fcfi_rq_id2_WORD word3 + uint32_t word4; +#define lpfc_reg_fcfi_type_match0_SHIFT 24 +#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match0_WORD word4 +#define lpfc_reg_fcfi_type_mask0_SHIFT 16 +#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask0_WORD word4 +#define lpfc_reg_fcfi_rctl_match0_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match0_WORD word4 +#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask0_WORD word4 + uint32_t word5; +#define lpfc_reg_fcfi_type_match1_SHIFT 24 +#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match1_WORD word5 +#define lpfc_reg_fcfi_type_mask1_SHIFT 16 +#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask1_WORD word5 +#define lpfc_reg_fcfi_rctl_match1_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match1_WORD word5 +#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask1_WORD word5 + uint32_t word6; +#define lpfc_reg_fcfi_type_match2_SHIFT 24 +#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match2_WORD word6 +#define lpfc_reg_fcfi_type_mask2_SHIFT 16 +#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask2_WORD word6 +#define lpfc_reg_fcfi_rctl_match2_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match2_WORD word6 +#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask2_WORD word6 + uint32_t word7; +#define lpfc_reg_fcfi_type_match3_SHIFT 24 +#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_type_match3_WORD word7 +#define lpfc_reg_fcfi_type_mask3_SHIFT 16 +#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_type_mask3_WORD word7 +#define lpfc_reg_fcfi_rctl_match3_SHIFT 8 +#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_match3_WORD word7 +#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0 +#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF +#define lpfc_reg_fcfi_rctl_mask3_WORD word7 + uint32_t word8; +#define lpfc_reg_fcfi_mam_SHIFT 13 +#define lpfc_reg_fcfi_mam_MASK 0x00000003 +#define lpfc_reg_fcfi_mam_WORD word8 +#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */ +#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */ +#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */ +#define lpfc_reg_fcfi_vv_SHIFT 12 +#define lpfc_reg_fcfi_vv_MASK 0x00000001 +#define lpfc_reg_fcfi_vv_WORD word8 +#define lpfc_reg_fcfi_vlan_tag_SHIFT 0 +#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF +#define lpfc_reg_fcfi_vlan_tag_WORD word8 +}; + +struct lpfc_mbx_unreg_fcfi { + uint32_t word1_rsv; + uint32_t word2; +#define lpfc_unreg_fcfi_SHIFT 0 +#define lpfc_unreg_fcfi_MASK 0x0000FFFF +#define lpfc_unreg_fcfi_WORD word2 +}; + +struct lpfc_mbx_read_rev { + uint32_t word1; +#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16 +#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F +#define lpfc_mbx_rd_rev_sli_lvl_WORD word1 +#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 +#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 +#define lpfc_mbx_rd_rev_fcoe_WORD word1 +#define lpfc_mbx_rd_rev_vpd_SHIFT 29 +#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 +#define lpfc_mbx_rd_rev_vpd_WORD word1 + uint32_t first_hw_rev; + uint32_t second_hw_rev; + uint32_t word4_rsvd; + uint32_t third_hw_rev; + uint32_t word6; +#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0 +#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF +#define lpfc_mbx_rd_rev_fcph_low_WORD word6 +#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8 +#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF +#define lpfc_mbx_rd_rev_fcph_high_WORD word6 +#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16 +#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF +#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6 +#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24 +#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF +#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6 + uint32_t word7_rsvd; + uint32_t fw_id_rev; + uint8_t fw_name[16]; + uint32_t ulp_fw_id_rev; + uint8_t ulp_fw_name[16]; + uint32_t word18_47_rsvd[30]; + uint32_t word48; +#define lpfc_mbx_rd_rev_avail_len_SHIFT 0 +#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF +#define lpfc_mbx_rd_rev_avail_len_WORD word48 + uint32_t vpd_paddr_low; + uint32_t vpd_paddr_high; + uint32_t avail_vpd_len; + uint32_t rsvd_52_63[12]; +}; + +struct lpfc_mbx_read_config { + uint32_t word1; +#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 +#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF +#define lpfc_mbx_rd_conf_max_bbc_WORD word1 +#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8 +#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF +#define lpfc_mbx_rd_conf_init_bbc_WORD word1 + uint32_t word2; +#define lpfc_mbx_rd_conf_nport_did_SHIFT 0 +#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF +#define lpfc_mbx_rd_conf_nport_did_WORD word2 +#define lpfc_mbx_rd_conf_topology_SHIFT 24 +#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF +#define lpfc_mbx_rd_conf_topology_WORD word2 + uint32_t word3; +#define lpfc_mbx_rd_conf_ao_SHIFT 0 +#define lpfc_mbx_rd_conf_ao_MASK 0x00000001 +#define lpfc_mbx_rd_conf_ao_WORD word3 +#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8 +#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F +#define lpfc_mbx_rd_conf_bb_scn_WORD word3 +#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12 +#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F +#define lpfc_mbx_rd_conf_cbb_scn_WORD word3 +#define lpfc_mbx_rd_conf_mc_SHIFT 29 +#define lpfc_mbx_rd_conf_mc_MASK 0x00000001 +#define lpfc_mbx_rd_conf_mc_WORD word3 + uint32_t word4; +#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 + uint32_t word5; +#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_lp_tov_WORD word5 + uint32_t word6; +#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 + uint32_t word7; +#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF +#define lpfc_mbx_rd_conf_r_t_tov_WORD word7 + uint32_t word8; +#define lpfc_mbx_rd_conf_al_tov_SHIFT 0 +#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F +#define lpfc_mbx_rd_conf_al_tov_WORD word8 + uint32_t word9; +#define lpfc_mbx_rd_conf_lmt_SHIFT 0 +#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_lmt_WORD word9 + uint32_t word10; +#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 +#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF +#define lpfc_mbx_rd_conf_max_alpa_WORD word10 + uint32_t word11_rsvd; + uint32_t word12; +#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 +#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_xri_base_WORD word12 +#define lpfc_mbx_rd_conf_xri_count_SHIFT 16 +#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_xri_count_WORD word12 + uint32_t word13; +#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rpi_base_WORD word13 +#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rpi_count_WORD word13 + uint32_t word14; +#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vpi_base_WORD word14 +#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vpi_count_WORD word14 + uint32_t word15; +#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vfi_base_WORD word15 +#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_vfi_count_WORD word15 + uint32_t word16; +#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0 +#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_fcfi_base_WORD word16 +#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 +#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 + uint32_t word17; +#define lpfc_mbx_rd_conf_rq_count_SHIFT 0 +#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_rq_count_WORD word17 +#define lpfc_mbx_rd_conf_eq_count_SHIFT 16 +#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_eq_count_WORD word17 + uint32_t word18; +#define lpfc_mbx_rd_conf_wq_count_SHIFT 0 +#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_wq_count_WORD word18 +#define lpfc_mbx_rd_conf_cq_count_SHIFT 16 +#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF +#define lpfc_mbx_rd_conf_cq_count_WORD word18 +}; + +struct lpfc_mbx_request_features { + uint32_t word1; +#define lpfc_mbx_rq_ftr_qry_SHIFT 0 +#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_qry_WORD word1 + uint32_t word2; +#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0 +#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2 +#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1 +#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2 +#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2 +#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_dif_WORD word2 +#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3 +#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_vf_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4 +#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5 +#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2 +#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6 +#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2 +#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 +#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 + uint32_t word3; +#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 +#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1 +#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2 +#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3 +#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4 +#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5 +#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6 +#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 +#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 +}; + +/* Mailbox Completion Queue Error Messages */ +#define MB_CQE_STATUS_SUCCESS 0x0 +#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 +#define MB_CQE_STATUS_INVALID_PARAMETER 0x2 +#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3 +#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 +#define MB_CQE_STATUS_DMA_FAILED 0x5 + +/* mailbox queue entry structure */ +struct lpfc_mqe { + uint32_t word0; +#define lpfc_mqe_status_SHIFT 16 +#define lpfc_mqe_status_MASK 0x0000FFFF +#define lpfc_mqe_status_WORD word0 +#define lpfc_mqe_command_SHIFT 8 +#define lpfc_mqe_command_MASK 0x000000FF +#define lpfc_mqe_command_WORD word0 + union { + uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1]; + /* sli4 mailbox commands */ + struct lpfc_mbx_sli4_config sli4_config; + struct lpfc_mbx_init_vfi init_vfi; + struct lpfc_mbx_reg_vfi reg_vfi; + struct lpfc_mbx_reg_vfi unreg_vfi; + struct lpfc_mbx_init_vpi init_vpi; + struct lpfc_mbx_resume_rpi resume_rpi; + struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; + struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; + struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; + struct lpfc_mbx_reg_fcfi reg_fcfi; + struct lpfc_mbx_unreg_fcfi unreg_fcfi; + struct lpfc_mbx_mq_create mq_create; + struct lpfc_mbx_eq_create eq_create; + struct lpfc_mbx_cq_create cq_create; + struct lpfc_mbx_wq_create wq_create; + struct lpfc_mbx_rq_create rq_create; + struct lpfc_mbx_mq_destroy mq_destroy; + struct lpfc_mbx_eq_destroy eq_destroy; + struct lpfc_mbx_cq_destroy cq_destroy; + struct lpfc_mbx_wq_destroy wq_destroy; + struct lpfc_mbx_rq_destroy rq_destroy; + struct lpfc_mbx_post_sgl_pages post_sgl_pages; + struct lpfc_mbx_nembed_cmd nembed_cmd; + struct lpfc_mbx_read_rev read_rev; + struct lpfc_mbx_read_vpi read_vpi; + struct lpfc_mbx_read_config rd_config; + struct lpfc_mbx_request_features req_ftrs; + struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; + struct lpfc_mbx_nop nop; + } un; +}; + +struct lpfc_mcqe { + uint32_t word0; +#define lpfc_mcqe_status_SHIFT 0 +#define lpfc_mcqe_status_MASK 0x0000FFFF +#define lpfc_mcqe_status_WORD word0 +#define lpfc_mcqe_ext_status_SHIFT 16 +#define lpfc_mcqe_ext_status_MASK 0x0000FFFF +#define lpfc_mcqe_ext_status_WORD word0 + uint32_t mcqe_tag0; + uint32_t mcqe_tag1; + uint32_t trailer; +#define lpfc_trailer_valid_SHIFT 31 +#define lpfc_trailer_valid_MASK 0x00000001 +#define lpfc_trailer_valid_WORD trailer +#define lpfc_trailer_async_SHIFT 30 +#define lpfc_trailer_async_MASK 0x00000001 +#define lpfc_trailer_async_WORD trailer +#define lpfc_trailer_hpi_SHIFT 29 +#define lpfc_trailer_hpi_MASK 0x00000001 +#define lpfc_trailer_hpi_WORD trailer +#define lpfc_trailer_completed_SHIFT 28 +#define lpfc_trailer_completed_MASK 0x00000001 +#define lpfc_trailer_completed_WORD trailer +#define lpfc_trailer_consumed_SHIFT 27 +#define lpfc_trailer_consumed_MASK 0x00000001 +#define lpfc_trailer_consumed_WORD trailer +#define lpfc_trailer_type_SHIFT 16 +#define lpfc_trailer_type_MASK 0x000000FF +#define lpfc_trailer_type_WORD trailer +#define lpfc_trailer_code_SHIFT 8 +#define lpfc_trailer_code_MASK 0x000000FF +#define lpfc_trailer_code_WORD trailer +#define LPFC_TRAILER_CODE_LINK 0x1 +#define LPFC_TRAILER_CODE_FCOE 0x2 +#define LPFC_TRAILER_CODE_DCBX 0x3 +}; + +struct lpfc_acqe_link { + uint32_t word0; +#define lpfc_acqe_link_speed_SHIFT 24 +#define lpfc_acqe_link_speed_MASK 0x000000FF +#define lpfc_acqe_link_speed_WORD word0 +#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0 +#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1 +#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2 +#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3 +#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4 +#define lpfc_acqe_link_duplex_SHIFT 16 +#define lpfc_acqe_link_duplex_MASK 0x000000FF +#define lpfc_acqe_link_duplex_WORD word0 +#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0 +#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1 +#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2 +#define lpfc_acqe_link_status_SHIFT 8 +#define lpfc_acqe_link_status_MASK 0x000000FF +#define lpfc_acqe_link_status_WORD word0 +#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0 +#define LPFC_ASYNC_LINK_STATUS_UP 0x1 +#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 +#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 +#define lpfc_acqe_link_physical_SHIFT 0 +#define lpfc_acqe_link_physical_MASK 0x000000FF +#define lpfc_acqe_link_physical_WORD word0 +#define LPFC_ASYNC_LINK_PORT_A 0x0 +#define LPFC_ASYNC_LINK_PORT_B 0x1 + uint32_t word1; +#define lpfc_acqe_link_fault_SHIFT 0 +#define lpfc_acqe_link_fault_MASK 0x000000FF +#define lpfc_acqe_link_fault_WORD word1 +#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 +#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 +#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 + uint32_t event_tag; + uint32_t trailer; +}; + +struct lpfc_acqe_fcoe { + uint32_t fcf_index; + uint32_t word1; +#define lpfc_acqe_fcoe_fcf_count_SHIFT 0 +#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF +#define lpfc_acqe_fcoe_fcf_count_WORD word1 +#define lpfc_acqe_fcoe_event_type_SHIFT 16 +#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF +#define lpfc_acqe_fcoe_event_type_WORD word1 +#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 +#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 +#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 + uint32_t event_tag; + uint32_t trailer; +}; + +struct lpfc_acqe_dcbx { + uint32_t tlv_ttl; + uint32_t reserved; + uint32_t event_tag; + uint32_t trailer; +}; + +/* + * Define the bootstrap mailbox (bmbx) region used to communicate + * mailbox command between the host and port. The mailbox consists + * of a payload area of 256 bytes and a completion queue of length + * 16 bytes. + */ +struct lpfc_bmbx_create { + struct lpfc_mqe mqe; + struct lpfc_mcqe mcqe; +}; + +#define SGL_ALIGN_SZ 64 +#define SGL_PAGE_SIZE 4096 +/* align SGL addr on a size boundary - adjust address up */ +#define NO_XRI ((uint16_t)-1) +struct wqe_common { + uint32_t word6; +#define wqe_xri_SHIFT 0 +#define wqe_xri_MASK 0x0000FFFF +#define wqe_xri_WORD word6 +#define wqe_ctxt_tag_SHIFT 16 +#define wqe_ctxt_tag_MASK 0x0000FFFF +#define wqe_ctxt_tag_WORD word6 + uint32_t word7; +#define wqe_ct_SHIFT 2 +#define wqe_ct_MASK 0x00000003 +#define wqe_ct_WORD word7 +#define wqe_status_SHIFT 4 +#define wqe_status_MASK 0x0000000f +#define wqe_status_WORD word7 +#define wqe_cmnd_SHIFT 8 +#define wqe_cmnd_MASK 0x000000ff +#define wqe_cmnd_WORD word7 +#define wqe_class_SHIFT 16 +#define wqe_class_MASK 0x00000007 +#define wqe_class_WORD word7 +#define wqe_pu_SHIFT 20 +#define wqe_pu_MASK 0x00000003 +#define wqe_pu_WORD word7 +#define wqe_erp_SHIFT 22 +#define wqe_erp_MASK 0x00000001 +#define wqe_erp_WORD word7 +#define wqe_lnk_SHIFT 23 +#define wqe_lnk_MASK 0x00000001 +#define wqe_lnk_WORD word7 +#define wqe_tmo_SHIFT 24 +#define wqe_tmo_MASK 0x000000ff +#define wqe_tmo_WORD word7 + uint32_t abort_tag; /* word 8 in WQE */ + uint32_t word9; +#define wqe_reqtag_SHIFT 0 +#define wqe_reqtag_MASK 0x0000FFFF +#define wqe_reqtag_WORD word9 +#define wqe_rcvoxid_SHIFT 16 +#define wqe_rcvoxid_MASK 0x0000FFFF +#define wqe_rcvoxid_WORD word9 + uint32_t word10; +#define wqe_pri_SHIFT 16 +#define wqe_pri_MASK 0x00000007 +#define wqe_pri_WORD word10 +#define wqe_pv_SHIFT 19 +#define wqe_pv_MASK 0x00000001 +#define wqe_pv_WORD word10 +#define wqe_xc_SHIFT 21 +#define wqe_xc_MASK 0x00000001 +#define wqe_xc_WORD word10 +#define wqe_ccpe_SHIFT 23 +#define wqe_ccpe_MASK 0x00000001 +#define wqe_ccpe_WORD word10 +#define wqe_ccp_SHIFT 24 +#define wqe_ccp_MASK 0x000000ff +#define wqe_ccp_WORD word10 + uint32_t word11; +#define wqe_cmd_type_SHIFT 0 +#define wqe_cmd_type_MASK 0x0000000f +#define wqe_cmd_type_WORD word11 +#define wqe_wqec_SHIFT 7 +#define wqe_wqec_MASK 0x00000001 +#define wqe_wqec_WORD word11 +#define wqe_cqid_SHIFT 16 +#define wqe_cqid_MASK 0x000003ff +#define wqe_cqid_WORD word11 +}; + +struct wqe_did { + uint32_t word5; +#define wqe_els_did_SHIFT 0 +#define wqe_els_did_MASK 0x00FFFFFF +#define wqe_els_did_WORD word5 +#define wqe_xmit_bls_ar_SHIFT 30 +#define wqe_xmit_bls_ar_MASK 0x00000001 +#define wqe_xmit_bls_ar_WORD word5 +#define wqe_xmit_bls_xo_SHIFT 31 +#define wqe_xmit_bls_xo_MASK 0x00000001 +#define wqe_xmit_bls_xo_WORD word5 +}; + +struct els_request64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; + uint32_t word4; +#define els_req64_sid_SHIFT 0 +#define els_req64_sid_MASK 0x00FFFFFF +#define els_req64_sid_WORD word4 +#define els_req64_sp_SHIFT 24 +#define els_req64_sp_MASK 0x00000001 +#define els_req64_sp_WORD word4 +#define els_req64_vf_SHIFT 25 +#define els_req64_vf_MASK 0x00000001 +#define els_req64_vf_WORD word4 + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t word12; +#define els_req64_vfid_SHIFT 1 +#define els_req64_vfid_MASK 0x00000FFF +#define els_req64_vfid_WORD word12 +#define els_req64_pri_SHIFT 13 +#define els_req64_pri_MASK 0x00000007 +#define els_req64_pri_WORD word12 + uint32_t word13; +#define els_req64_hopcnt_SHIFT 24 +#define els_req64_hopcnt_MASK 0x000000ff +#define els_req64_hopcnt_WORD word13 + uint32_t reserved[2]; +}; + +struct xmit_els_rsp64_wqe { + struct ulp_bde64 bde; + uint32_t rsvd3; + uint32_t rsvd4; + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct xmit_bls_rsp64_wqe { + uint32_t payload0; + uint32_t word1; +#define xmit_bls_rsp64_rxid_SHIFT 0 +#define xmit_bls_rsp64_rxid_MASK 0x0000ffff +#define xmit_bls_rsp64_rxid_WORD word1 +#define xmit_bls_rsp64_oxid_SHIFT 16 +#define xmit_bls_rsp64_oxid_MASK 0x0000ffff +#define xmit_bls_rsp64_oxid_WORD word1 + uint32_t word2; +#define xmit_bls_rsp64_seqcntlo_SHIFT 0 +#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcntlo_WORD word2 +#define xmit_bls_rsp64_seqcnthi_SHIFT 16 +#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff +#define xmit_bls_rsp64_seqcnthi_WORD word2 + uint32_t rsrvd3; + uint32_t rsrvd4; + struct wqe_did wqe_dest; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; +struct wqe_rctl_dfctl { + uint32_t word5; +#define wqe_si_SHIFT 2 +#define wqe_si_MASK 0x000000001 +#define wqe_si_WORD word5 +#define wqe_la_SHIFT 3 +#define wqe_la_MASK 0x000000001 +#define wqe_la_WORD word5 +#define wqe_ls_SHIFT 7 +#define wqe_ls_MASK 0x000000001 +#define wqe_ls_WORD word5 +#define wqe_dfctl_SHIFT 8 +#define wqe_dfctl_MASK 0x0000000ff +#define wqe_dfctl_WORD word5 +#define wqe_type_SHIFT 16 +#define wqe_type_MASK 0x0000000ff +#define wqe_type_WORD word5 +#define wqe_rctl_SHIFT 24 +#define wqe_rctl_MASK 0x0000000ff +#define wqe_rctl_WORD word5 +}; + +struct xmit_seq64_wqe { + struct ulp_bde64 bde; + uint32_t paylaod_offset; + uint32_t relative_offset; + struct wqe_rctl_dfctl wge_ctl; + struct wqe_common wqe_com; /* words 6-11 */ + /* Note: word10 different REVISIT */ + uint32_t xmit_len; + uint32_t rsvd_12_15[3]; +}; +struct xmit_bcast64_wqe { + struct ulp_bde64 bde; + uint32_t paylaod_len; + uint32_t rsvd4; + struct wqe_rctl_dfctl wge_ctl; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct gen_req64_wqe { + struct ulp_bde64 bde; + uint32_t command_len; + uint32_t payload_len; + struct wqe_rctl_dfctl wge_ctl; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; +}; + +struct create_xri_wqe { + uint32_t rsrvd[5]; /* words 0-4 */ + struct wqe_did wqe_dest; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +#define T_REQUEST_TAG 3 +#define T_XRI_TAG 1 + +struct abort_cmd_wqe { + uint32_t rsrvd[3]; + uint32_t word3; +#define abort_cmd_ia_SHIFT 0 +#define abort_cmd_ia_MASK 0x000000001 +#define abort_cmd_ia_WORD word3 +#define abort_cmd_criteria_SHIFT 8 +#define abort_cmd_criteria_MASK 0x0000000ff +#define abort_cmd_criteria_WORD word3 + uint32_t rsrvd4; + uint32_t rsrvd5; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_iwrite64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; + uint32_t total_xfer_len; + uint32_t initial_xfer_len; + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_iread64_wqe { + struct ulp_bde64 bde; + uint32_t payload_len; /* word 3 */ + uint32_t total_xfer_len; /* word 4 */ + uint32_t rsrvd5; /* word 5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + +struct fcp_icmnd64_wqe { + struct ulp_bde64 bde; /* words 0-2 */ + uint32_t rsrvd[3]; /* words 3-5 */ + struct wqe_common wqe_com; /* words 6-11 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ +}; + + +union lpfc_wqe { + uint32_t words[16]; + struct lpfc_wqe_generic generic; + struct fcp_icmnd64_wqe fcp_icmd; + struct fcp_iread64_wqe fcp_iread; + struct fcp_iwrite64_wqe fcp_iwrite; + struct abort_cmd_wqe abort_cmd; + struct create_xri_wqe create_xri; + struct xmit_bcast64_wqe xmit_bcast64; + struct xmit_seq64_wqe xmit_sequence; + struct xmit_bls_rsp64_wqe xmit_bls_rsp; + struct xmit_els_rsp64_wqe xmit_els_rsp; + struct els_request64_wqe els_req; + struct gen_req64_wqe gen_req; +}; + +#define FCP_COMMAND 0x0 +#define FCP_COMMAND_DATA_OUT 0x1 +#define ELS_COMMAND_NON_FIP 0xC +#define ELS_COMMAND_FIP 0xD +#define OTHER_COMMAND 0x8 + diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 86d1bdcbf2d8..2f5907f92eea 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -34,8 +34,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -51,9 +53,23 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; -static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); +static int lpfc_sli4_queue_create(struct lpfc_hba *); +static void lpfc_sli4_queue_destroy(struct lpfc_hba *); +static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); +static int lpfc_setup_endian_order(struct lpfc_hba *); +static int lpfc_sli4_read_config(struct lpfc_hba *); +static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); +static void lpfc_free_sgl_list(struct lpfc_hba *); +static int lpfc_init_sgl_list(struct lpfc_hba *); +static int lpfc_init_active_sgl_array(struct lpfc_hba *); +static void lpfc_free_active_sgl(struct lpfc_hba *); +static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); +static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); +static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); +static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); +static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba) return -ENOMEM; } - mb = &pmb->mb; + mb = &pmb->u.mb; phba->link_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { @@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba) mb->mbxCommand, mb->mbxStatus); mb->un.varDmp.word_cnt = 0; } + /* dump mem may return a zero when finished or we got a + * mailbox error, either way we are done. + */ + if (mb->un.varDmp.word_cnt == 0) + break; if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, @@ -233,7 +254,7 @@ out_free_mbox: static void lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { - if (pmboxq->mb.mbxStatus == MBX_SUCCESS) + if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) phba->temp_sensor_support = 1; else phba->temp_sensor_support = 0; @@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /* character array used for decoding dist type. */ char dist_char[] = "nabx"; - if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { + if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return; } @@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) prg = (struct prog_id *) &prog_id_word; /* word 7 contain option rom version */ - prog_id_word = pmboxq->mb.un.varWords[7]; + prog_id_word = pmboxq->u.mb.un.varWords[7]; /* Decode the Option rom version word to a readable string */ if (prg->dist < 4) @@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } - mb = &pmb->mb; + mb = &pmb->u.mb; /* Get login parameters for NID. */ lpfc_read_sparam(phba, pmb, 0); @@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Update the fc_host data structures with new wwn. */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + fc_host_max_npiv_vports(shost) = phba->max_vpi; /* If no serial number in VPD data, use low 6 bytes of WWNN */ /* This should be consolidated into parse_vpd ? - mr */ @@ -460,17 +482,18 @@ lpfc_config_port_post(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0352 Config MSI mailbox command " "failed, mbxCmd x%x, mbxStatus x%x\n", - pmb->mb.mbxCommand, pmb->mb.mbxStatus); + pmb->u.mb.mbxCommand, + pmb->u.mb.mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } + spin_lock_irq(&phba->hbalock); /* Initialize ERATT handling flag */ phba->hba_flag &= ~HBA_ERATT_HANDLED; /* Enable appropriate host interrupts */ - spin_lock_irq(&phba->hbalock); status = readl(phba->HCregaddr); status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) @@ -571,16 +594,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; - /* Disable interrupts */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ + + if (phba->sli_rev <= LPFC_SLI_REV3) { + /* Disable interrupts */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } if (phba->pport->load_flag & FC_UNLOADING) lpfc_cleanup_discovery_resources(phba->pport); else { vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && + vports[i] != NULL; i++) lpfc_cleanup_discovery_resources(vports[i]); lpfc_destroy_vport_work_array(phba, vports); } @@ -588,7 +615,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) } /** - * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset + * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring @@ -598,8 +625,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) * 0 - sucess. * Any other value - error. **/ -int -lpfc_hba_down_post(struct lpfc_hba *phba) +static int +lpfc_hba_down_post_s3(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; @@ -642,6 +669,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba) return 0; } +/** + * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +static int +lpfc_hba_down_post_s4(struct lpfc_hba *phba) +{ + struct lpfc_scsi_buf *psb, *psb_next; + LIST_HEAD(aborts); + int ret; + unsigned long iflag = 0; + ret = lpfc_hba_down_post_s3(phba); + if (ret) + return ret; + /* At this point in time the HBA is either reset or DOA. Either + * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be + * on the lpfc_sgl_list so that it can either be freed if the + * driver is unloading or reposted if the driver is restarting + * the port. + */ + spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ + /* scsl_buf_list */ + /* abts_sgl_list_lock required because worker thread uses this + * list. + */ + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, + &phba->sli4_hba.lpfc_sgl_list); + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + /* abts_scsi_buf_list_lock required because worker thread uses this + * list. + */ + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, + &aborts); + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(psb, psb_next, &aborts, list) { + psb->pCmd = NULL; + psb->status = IOSTAT_SUCCESS; + } + spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list); + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + return 0; +} + +/** + * lpfc_hba_down_post - Wrapper func for hba down post routine + * @phba: pointer to lpfc HBA data structure. + * + * This routine wraps the actual SLI3 or SLI4 routine for performing + * uninitialization after the HBA is reset when bring down the SLI Layer. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +int +lpfc_hba_down_post(struct lpfc_hba *phba) +{ + return (*phba->lpfc_hba_down_post)(phba); +} /** * lpfc_hb_timeout - The HBA-timer timeout handler @@ -809,7 +907,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) "taking this port offline.\n"); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); @@ -834,13 +932,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_reset_barrier(phba); + spin_lock_irq(&phba->hbalock); lpfc_sli_brdreset(phba); + spin_unlock_irq(&phba->hbalock); lpfc_hba_down_post(phba); lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); @@ -849,6 +949,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba) } /** + * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to bring a SLI4 HBA offline when HBA hardware error + * other than Port Error 6 has been detected. + **/ +static void +lpfc_sli4_offline_eratt(struct lpfc_hba *phba) +{ + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_sli4_brdreset(phba); + lpfc_hba_down_post(phba); + lpfc_sli4_post_status_check(phba); + lpfc_unblock_mgmt_io(phba); + phba->link_state = LPFC_HBA_ERROR; +} + +/** * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler * @phba: pointer to lpfc hba data structure. * @@ -864,6 +983,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; struct lpfc_sli *psli = &phba->sli; + /* If the pci channel is offline, ignore possible errors, + * since we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); + return; + } + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0479 Deferred Adapter Hardware Error " "Data: x%x x%x x%x\n", @@ -871,7 +1000,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); @@ -909,13 +1038,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) phba->work_hs = old_host_status & ~HS_FFER1; + spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); phba->work_status[1] = readl(phba->MBslimaddr + 0xac); } +static void +lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) +{ + struct lpfc_board_event_header board_event; + struct Scsi_Host *shost; + + board_event.event_type = FC_REG_BOARD_EVENT; + board_event.subcategory = LPFC_EVENT_PORTINTERR; + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(board_event), + (char *) &board_event, + LPFC_NL_VENDOR_ID); +} + /** - * lpfc_handle_eratt - The HBA hardware error handler + * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the following HBA hardware error @@ -924,8 +1070,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) * 2 - DMA ring index out of range * 3 - Mailbox command came back as unknown **/ -void -lpfc_handle_eratt(struct lpfc_hba *phba) +static void +lpfc_handle_eratt_s3(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; @@ -934,24 +1080,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba) unsigned long temperature; struct temp_event temp_event_data; struct Scsi_Host *shost; - struct lpfc_board_event_header board_event; /* If the pci channel is offline, ignore possible errors, - * since we cannot communicate with the pci card anyway. */ - if (pci_channel_offline(phba->pcidev)) + * since we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); return; + } + /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; /* Send an internal error event to mgmt application */ - board_event.event_type = FC_REG_BOARD_EVENT; - board_event.subcategory = LPFC_EVENT_PORTINTERR; - shost = lpfc_shost_from_vport(phba->pport); - fc_host_post_vendor_event(shost, fc_get_event_number(), - sizeof(board_event), - (char *) &board_event, - LPFC_NL_VENDOR_ID); + lpfc_board_errevt_to_mgmt(phba); if (phba->hba_flag & DEFER_ERATT) lpfc_handle_deferred_eratt(phba); @@ -965,7 +1110,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba) phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* @@ -1037,6 +1182,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba) } /** + * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to handle the SLI4 HBA hardware error attention + * conditions. + **/ +static void +lpfc_handle_eratt_s4(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + uint32_t event_data; + struct Scsi_Host *shost; + + /* If the pci channel is offline, ignore possible errors, since + * we cannot communicate with the pci card anyway. + */ + if (pci_channel_offline(phba->pcidev)) + return; + /* If resets are disabled then leave the HBA alone and return */ + if (!phba->cfg_enable_hba_reset) + return; + + /* Send an internal error event to mgmt application */ + lpfc_board_errevt_to_mgmt(phba); + + /* For now, the actual action for SLI4 device handling is not + * specified yet, just treated it as adaptor hardware failure + */ + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", + phba->work_status[0], phba->work_status[1]); + + event_data = FC_REG_DUMP_EVENT; + shost = lpfc_shost_from_vport(vport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(event_data), (char *) &event_data, + SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); + + lpfc_sli4_offline_eratt(phba); +} + +/** + * lpfc_handle_eratt - Wrapper func for handling hba error attention + * @phba: pointer to lpfc HBA data structure. + * + * This routine wraps the actual SLI3 or SLI4 hba error attention handling + * routine from the API jump table function pointer from the lpfc_hba struct. + * + * Return codes + * 0 - sucess. + * Any other value - error. + **/ +void +lpfc_handle_eratt(struct lpfc_hba *phba) +{ + (*phba->lpfc_handle_eratt)(phba); +} + +/** * lpfc_handle_latt - The HBA link event handler * @phba: pointer to lpfc hba data structure. * @@ -1137,7 +1341,7 @@ lpfc_handle_latt_err_exit: * 0 - pointer to the VPD passed in is NULL * 1 - success **/ -static int +int lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) { uint8_t lenlo, lenhi; @@ -1292,6 +1496,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) uint16_t dev_id = phba->pcidev->device; int max_speed; int GE = 0; + int oneConnect = 0; /* default is not a oneConnect */ struct { char * name; int max_speed; @@ -1437,6 +1642,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; break; + case PCI_DEVICE_ID_TIGERSHARK: + oneConnect = 1; + m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; + break; + case PCI_DEVICE_ID_TIGERSHARK_S: + oneConnect = 1; + m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; + break; default: m = (typeof(m)){ NULL }; break; @@ -1444,13 +1657,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) if (mdp && mdp[0] == '\0') snprintf(mdp, 79,"%s", m.name); - if (descp && descp[0] == '\0') - snprintf(descp, 255, - "Emulex %s %d%s %s %s", - m.name, m.max_speed, - (GE) ? "GE" : "Gb", - m.bus, - (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); + /* oneConnect hba requires special processing, they are all initiators + * and we put the port number on the end + */ + if (descp && descp[0] == '\0') { + if (oneConnect) + snprintf(descp, 255, + "Emulex OneConnect %s, FCoE Initiator, Port %s", + m.name, + phba->Port); + else + snprintf(descp, 255, + "Emulex %s %d%s %s %s", + m.name, m.max_speed, + (GE) ? "GE" : "Gb", + m.bus, + (GE) ? "FCoE Adapter" : + "Fibre Channel Adapter"); + } } /** @@ -1533,7 +1757,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; icmd->ulpLe = 1; - if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == + IOCB_ERROR) { lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); cnt++; @@ -1761,7 +1986,6 @@ lpfc_cleanup(struct lpfc_vport *vport) * Lets wait for this to happen, if needed. */ while (!list_empty(&vport->fc_nodes)) { - if (i++ > 3000) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0233 Nodelist not empty\n"); @@ -1782,7 +2006,6 @@ lpfc_cleanup(struct lpfc_vport *vport) /* Wait for any activity on ndlps to settle */ msleep(10); } - return; } /** @@ -1803,22 +2026,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) } /** - * lpfc_stop_phba_timers - Stop all the timers associated with an HBA + * lpfc_stop_hba_timers - Stop all the timers associated with an HBA * @phba: pointer to lpfc hba data structure. * * This routine stops all the timers associated with a HBA. This function is * invoked before either putting a HBA offline or unloading the driver. **/ -static void -lpfc_stop_phba_timers(struct lpfc_hba *phba) +void +lpfc_stop_hba_timers(struct lpfc_hba *phba) { - del_timer_sync(&phba->fcp_poll_timer); lpfc_stop_vport_timers(phba->pport); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); - phba->hb_outstanding = 0; - del_timer_sync(&phba->hb_tmofunc); del_timer_sync(&phba->eratt_poll); + del_timer_sync(&phba->hb_tmofunc); + phba->hb_outstanding = 0; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + /* Stop any LightPulse device specific driver timers */ + del_timer_sync(&phba->fcp_poll_timer); + break; + case LPFC_PCI_DEV_OC: + /* Stop any OneConnect device sepcific driver timers */ + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0297 Invalid device group (x%x)\n", + phba->pci_dev_grp); + break; + } return; } @@ -1878,14 +2115,21 @@ lpfc_online(struct lpfc_hba *phba) return 1; } - if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ - lpfc_unblock_mgmt_io(phba); - return 1; + if (phba->sli_rev == LPFC_SLI_REV4) { + if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; + } + } else { + if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; + } } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); @@ -1947,11 +2191,12 @@ lpfc_offline_prep(struct lpfc_hba * phba) /* Issue an unreg_login to all nodes on all vports */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { struct Scsi_Host *shost; if (vports[i]->load_flag & FC_UNLOADING) continue; + vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; shost = lpfc_shost_from_vport(vports[i]); list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, @@ -1975,7 +2220,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) } lpfc_destroy_vport_work_array(phba, vports); - lpfc_sli_flush_mbox_queue(phba); + lpfc_sli_mbox_sys_shutdown(phba); } /** @@ -1996,11 +2241,11 @@ lpfc_offline(struct lpfc_hba *phba) if (phba->pport->fc_flag & FC_OFFLINE_MODE) return; - /* stop all timers associated with this hba */ - lpfc_stop_phba_timers(phba); + /* stop port and all timers associated with this hba */ + lpfc_stop_port(phba); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_stop_vport_timers(vports[i]); lpfc_destroy_vport_work_array(phba, vports); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -2013,7 +2258,7 @@ lpfc_offline(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->work_port_events = 0; @@ -2106,6 +2351,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; + if (phba->sli_rev == LPFC_SLI_REV4) { + shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; + shost->sg_tablesize = phba->cfg_sg_seg_cnt; + } /* * Set initial can_queue value since 0 is no longer supported and @@ -2123,6 +2372,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); + INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); init_timer(&vport->fc_disctmo); @@ -2314,15 +2564,3461 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) } /** - * lpfc_enable_msix - Enable MSI-X interrupt mode + * lpfc_stop_port_s3 - Stop SLI3 device port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to stop an SLI3 device port, it stops the device + * from generating interrupts and stops the device driver's timers for the + * device. + **/ +static void +lpfc_stop_port_s3(struct lpfc_hba *phba) +{ + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + /* Clear all pending interrupts */ + writel(0xffffffff, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ + + /* Reset some HBA SLI setup states */ + lpfc_stop_hba_timers(phba); + phba->pport->work_port_events = 0; +} + +/** + * lpfc_stop_port_s4 - Stop SLI4 device port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to stop an SLI4 device port, it stops the device + * from generating interrupts and stops the device driver's timers for the + * device. + **/ +static void +lpfc_stop_port_s4(struct lpfc_hba *phba) +{ + /* Reset some HBA SLI4 setup states */ + lpfc_stop_hba_timers(phba); + phba->pport->work_port_events = 0; + phba->sli4_hba.intr_enable = 0; + /* Hard clear it for now, shall have more graceful way to wait later */ + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; +} + +/** + * lpfc_stop_port - Wrapper function for stopping hba port + * @phba: Pointer to HBA context object. + * + * This routine wraps the actual SLI3 or SLI4 hba stop port routine from + * the API jump table function pointer from the lpfc_hba struct. + **/ +void +lpfc_stop_port(struct lpfc_hba *phba) +{ + phba->lpfc_stop_port(phba); +} + +/** + * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove the driver default fcf record from + * the port. This routine currently acts on FCF Index 0. + * + **/ +void +lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) +{ + int rc = 0; + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; + uint32_t mbox_tmo, req_len; + uint32_t shdr_status, shdr_add_status; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2020 Failed to allocate mbox for ADD_FCF cmd\n"); + return; + } + + req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - + sizeof(struct lpfc_sli4_cfg_mhdr); + rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, + req_len, LPFC_SLI4_MBX_EMBED); + /* + * In phase 1, there is a single FCF index, 0. In phase2, the driver + * supports multiple FCF indices. + */ + del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; + bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); + bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, + phba->fcf.fcf_indx); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + } + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr_status = bf_get(lpfc_mbox_hdr_status, + &del_fcf_record->header.cfg_shdr.response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &del_fcf_record->header.cfg_shdr.response); + if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2516 DEL FCF of default FCF Index failed " + "mbx status x%x, status x%x add_status x%x\n", + rc, shdr_status, shdr_add_status); + } + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); +} + +/** + * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to parse the SLI4 link-attention link fault code and + * translate it into the base driver's read link attention mailbox command + * status. + * + * Return: Link-attention status in terms of base driver's coding. + **/ +static uint16_t +lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + uint16_t latt_fault; + + switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { + case LPFC_ASYNC_LINK_FAULT_NONE: + case LPFC_ASYNC_LINK_FAULT_LOCAL: + case LPFC_ASYNC_LINK_FAULT_REMOTE: + latt_fault = 0; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0398 Invalid link fault code: x%x\n", + bf_get(lpfc_acqe_link_fault, acqe_link)); + latt_fault = MBXERR_ERROR; + break; + } + return latt_fault; +} + +/** + * lpfc_sli4_parse_latt_type - Parse sli4 link attention type + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to parse the SLI4 link attention type and translate it + * into the base driver's link attention type coding. + * + * Return: Link attention type in terms of base driver's coding. + **/ +static uint8_t +lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + uint8_t att_type; + + switch (bf_get(lpfc_acqe_link_status, acqe_link)) { + case LPFC_ASYNC_LINK_STATUS_DOWN: + case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: + att_type = AT_LINK_DOWN; + break; + case LPFC_ASYNC_LINK_STATUS_UP: + /* Ignore physical link up events - wait for logical link up */ + att_type = AT_RESERVED; + break; + case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: + att_type = AT_LINK_UP; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0399 Invalid link attention type: x%x\n", + bf_get(lpfc_acqe_link_status, acqe_link)); + att_type = AT_RESERVED; + break; + } + return att_type; +} + +/** + * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to parse the SLI4 link-attention link speed and translate + * it into the base driver's link-attention link speed coding. + * + * Return: Link-attention link speed in terms of base driver's coding. + **/ +static uint8_t +lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + uint8_t link_speed; + + switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { + case LPFC_ASYNC_LINK_SPEED_ZERO: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_10MBPS: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_100MBPS: + link_speed = LA_UNKNW_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_1GBPS: + link_speed = LA_1GHZ_LINK; + break; + case LPFC_ASYNC_LINK_SPEED_10GBPS: + link_speed = LA_10GHZ_LINK; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0483 Invalid link-attention link speed: x%x\n", + bf_get(lpfc_acqe_link_speed, acqe_link)); + link_speed = LA_UNKNW_LINK; + break; + } + return link_speed; +} + +/** + * lpfc_sli4_async_link_evt - Process the asynchronous link event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async link completion queue entry. + * + * This routine is to handle the SLI4 asynchronous link event. + **/ +static void +lpfc_sli4_async_link_evt(struct lpfc_hba *phba, + struct lpfc_acqe_link *acqe_link) +{ + struct lpfc_dmabuf *mp; + LPFC_MBOXQ_t *pmb; + MAILBOX_t *mb; + READ_LA_VAR *la; + uint8_t att_type; + + att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); + if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) + return; + pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0395 The mboxq allocation failed\n"); + return; + } + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!mp) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0396 The lpfc_dmabuf allocation failed\n"); + goto out_free_pmb; + } + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp->virt) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0397 The mbuf allocation failed\n"); + goto out_free_dmabuf; + } + + /* Cleanup any outstanding ELS commands */ + lpfc_els_flush_all_cmd(phba); + + /* Block ELS IOCBs until we have done process link event */ + phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; + + /* Update link event statistics */ + phba->sli.slistat.link_event++; + + /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ + lpfc_read_la(phba, pmb, mp); + pmb->vport = phba->pport; + + /* Parse and translate status field */ + mb = &pmb->u.mb; + mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); + + /* Parse and translate link attention fields */ + la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; + la->eventTag = acqe_link->event_tag; + la->attType = att_type; + la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); + + /* Fake the the following irrelvant fields */ + la->topology = TOPOLOGY_PT_PT; + la->granted_AL_PA = 0; + la->il = 0; + la->pb = 0; + la->fa = 0; + la->mm = 0; + + /* Keep the link status for extra SLI4 state machine reference */ + phba->sli4_hba.link_state.speed = + bf_get(lpfc_acqe_link_speed, acqe_link); + phba->sli4_hba.link_state.duplex = + bf_get(lpfc_acqe_link_duplex, acqe_link); + phba->sli4_hba.link_state.status = + bf_get(lpfc_acqe_link_status, acqe_link); + phba->sli4_hba.link_state.physical = + bf_get(lpfc_acqe_link_physical, acqe_link); + phba->sli4_hba.link_state.fault = + bf_get(lpfc_acqe_link_fault, acqe_link); + + /* Invoke the lpfc_handle_latt mailbox command callback function */ + lpfc_mbx_cmpl_read_la(phba, pmb); + + return; + +out_free_dmabuf: + kfree(mp); +out_free_pmb: + mempool_free(pmb, phba->mbox_mem_pool); +} + +/** + * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async fcoe completion queue entry. + * + * This routine is to handle the SLI4 asynchronous fcoe event. + **/ +static void +lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, + struct lpfc_acqe_fcoe *acqe_fcoe) +{ + uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); + int rc; + + switch (event_type) { + case LPFC_FCOE_EVENT_TYPE_NEW_FCF: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2546 New FCF found index 0x%x tag 0x%x \n", + acqe_fcoe->fcf_index, + acqe_fcoe->event_tag); + /* + * If the current FCF is in discovered state, + * do nothing. + */ + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERED) { + spin_unlock_irq(&phba->hbalock); + break; + } + spin_unlock_irq(&phba->hbalock); + + /* Read the FCF table and re-discover SAN. */ + rc = lpfc_sli4_read_fcf_record(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2547 Read FCF record failed 0x%x\n", + rc); + break; + + case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2548 FCF Table full count 0x%x tag 0x%x \n", + bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), + acqe_fcoe->event_tag); + break; + + case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "2549 FCF disconnected fron network index 0x%x" + " tag 0x%x \n", acqe_fcoe->fcf_index, + acqe_fcoe->event_tag); + /* If the event is not for currently used fcf do nothing */ + if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) + break; + /* + * Currently, driver support only one FCF - so treat this as + * a link down. + */ + lpfc_linkdown(phba); + /* Unregister FCF if no devices connected to it */ + lpfc_unregister_unused_fcf(phba); + break; + + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0288 Unknown FCoE event type 0x%x event tag " + "0x%x\n", event_type, acqe_fcoe->event_tag); + break; + } +} + +/** + * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async dcbx completion queue entry. + * + * This routine is to handle the SLI4 asynchronous dcbx event. + **/ +static void +lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, + struct lpfc_acqe_dcbx *acqe_dcbx) +{ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0290 The SLI4 DCBX asynchronous event is not " + "handled yet\n"); +} + +/** + * lpfc_sli4_async_event_proc - Process all the pending asynchronous event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 asynchronous events. + **/ +void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + + /* First, declare the async event has been handled */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~ASYNC_EVENT; + spin_unlock_irq(&phba->hbalock); + /* Now, handle all the async events */ + while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { + /* Get the first event from the head of the event queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + /* Process the asynchronous event */ + switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { + case LPFC_TRAILER_CODE_LINK: + lpfc_sli4_async_link_evt(phba, + &cq_event->cqe.acqe_link); + break; + case LPFC_TRAILER_CODE_FCOE: + lpfc_sli4_async_fcoe_evt(phba, + &cq_event->cqe.acqe_fcoe); + break; + case LPFC_TRAILER_CODE_DCBX: + lpfc_sli4_async_dcbx_evt(phba, + &cq_event->cqe.acqe_dcbx); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "1804 Invalid asynchrous event code: " + "x%x\n", bf_get(lpfc_trailer_code, + &cq_event->cqe.mcqe_cmpl)); + break; + } + /* Free the completion event processed to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + } +} + +/** + * lpfc_api_table_setup - Set up per hba pci-device group func api jump table + * @phba: pointer to lpfc hba data structure. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine is invoked to set up the per HBA PCI-Device group function + * API jump table entries. + * + * Return: 0 if success, otherwise -ENODEV + **/ +int +lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + int rc; + + /* Set up lpfc PCI-device group */ + phba->pci_dev_grp = dev_grp; + + /* The LPFC_PCI_DEV_OC uses SLI4 */ + if (dev_grp == LPFC_PCI_DEV_OC) + phba->sli_rev = LPFC_SLI_REV4; + + /* Set up device INIT API function jump table */ + rc = lpfc_init_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up SCSI API function jump table */ + rc = lpfc_scsi_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up SLI API function jump table */ + rc = lpfc_sli_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + /* Set up MBOX API function jump table */ + rc = lpfc_mbox_api_table_setup(phba, dev_grp); + if (rc) + return -ENODEV; + + return 0; +} + +/** + * lpfc_log_intr_mode - Log the active interrupt mode + * @phba: pointer to lpfc hba data structure. + * @intr_mode: active interrupt mode adopted. + * + * This routine it invoked to log the currently used active interrupt mode + * to the device. + **/ +static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) +{ + switch (intr_mode) { + case 0: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0470 Enable INTx interrupt mode.\n"); + break; + case 1: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0481 Enabled MSI interrupt mode.\n"); + break; + case 2: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0480 Enabled MSI-X interrupt mode.\n"); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0482 Illegal interrupt mode.\n"); + break; + } + return; +} + +/** + * lpfc_enable_pci_dev - Enable a generic PCI device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the PCI device that is common to all + * PCI devices. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_enable_pci_dev(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + int bars; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + goto out_error; + else + pdev = phba->pcidev; + /* Select PCI BARs */ + bars = pci_select_bars(pdev, IORESOURCE_MEM); + /* Enable PCI device */ + if (pci_enable_device_mem(pdev)) + goto out_error; + /* Request PCI resource for the device */ + if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) + goto out_disable_device; + /* Set up device as PCI master and save state for EEH */ + pci_set_master(pdev); + pci_try_set_mwi(pdev); + pci_save_state(pdev); + + return 0; + +out_disable_device: + pci_disable_device(pdev); +out_error: + return -ENODEV; +} + +/** + * lpfc_disable_pci_dev - Disable a generic PCI device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable the PCI device that is common to all + * PCI devices. + **/ +static void +lpfc_disable_pci_dev(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + int bars; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + /* Select PCI BARs */ + bars = pci_select_bars(pdev, IORESOURCE_MEM); + /* Release PCI resource and disable PCI device */ + pci_release_selected_regions(pdev, bars); + pci_disable_device(pdev); + /* Null out PCI private reference to driver */ + pci_set_drvdata(pdev, NULL); + + return; +} + +/** + * lpfc_reset_hba - Reset a hba + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to reset a hba device. It brings the HBA + * offline, performs a board restart, and then brings the board back + * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up + * on outstanding mailbox commands. + **/ +void +lpfc_reset_hba(struct lpfc_hba *phba) +{ + /* If resets are disabled then set error state and return. */ + if (!phba->cfg_enable_hba_reset) { + phba->link_state = LPFC_HBA_ERROR; + return; + } + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + lpfc_online(phba); + lpfc_unblock_mgmt_io(phba); +} + +/** + * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-3 HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + + /* + * Initialize timers used by driver + */ + + /* Heartbeat timer */ + init_timer(&phba->hb_tmofunc); + phba->hb_tmofunc.function = lpfc_hb_timeout; + phba->hb_tmofunc.data = (unsigned long)phba; + + psli = &phba->sli; + /* MBOX heartbeat timer */ + init_timer(&psli->mbox_tmo); + psli->mbox_tmo.function = lpfc_mbox_timeout; + psli->mbox_tmo.data = (unsigned long) phba; + /* FCP polling mode timer */ + init_timer(&phba->fcp_poll_timer); + phba->fcp_poll_timer.function = lpfc_poll_timeout; + phba->fcp_poll_timer.data = (unsigned long) phba; + /* Fabric block timer */ + init_timer(&phba->fabric_block_timer); + phba->fabric_block_timer.function = lpfc_fabric_block_timeout; + phba->fabric_block_timer.data = (unsigned long) phba; + /* EA polling mode timer */ + init_timer(&phba->eratt_poll); + phba->eratt_poll.function = lpfc_poll_eratt; + phba->eratt_poll.data = (unsigned long) phba; + + /* Host attention work mask setup */ + phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); + phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); + + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); + /* + * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be dynamically calculated. + * 2 segments are added since the IOCB needs a command and response bde. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + if (phba->cfg_enable_bg) { + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; + phba->cfg_sg_dma_buf_size += + phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + } + + /* Also reinitialize the host templates with new values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + phba->max_vpi = LPFC_MAX_VPI; + /* This will be set to correct value after config_port mbox */ + phba->max_vports = 0; + + /* + * Initialize the SLI Layer to run with lpfc HBAs. + */ + lpfc_sli_setup(phba); + lpfc_sli_queue_setup(phba); + + /* Allocate device driver memory */ + if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) + return -ENOMEM; + + return 0; +} + +/** + * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up + * specific for supporting the SLI-3 HBA device it attached to. + **/ +static void +lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) +{ + /* Free device driver memory allocated */ + lpfc_mem_free_all(phba); + + return; +} + +/** + * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources specific to + * support the SLI-4 HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli; + int rc; + int i, hbq_count; + + /* Before proceed, wait for POST done and device ready */ + rc = lpfc_sli4_post_status_check(phba); + if (rc) + return -ENODEV; + + /* + * Initialize timers used by driver + */ + + /* Heartbeat timer */ + init_timer(&phba->hb_tmofunc); + phba->hb_tmofunc.function = lpfc_hb_timeout; + phba->hb_tmofunc.data = (unsigned long)phba; + + psli = &phba->sli; + /* MBOX heartbeat timer */ + init_timer(&psli->mbox_tmo); + psli->mbox_tmo.function = lpfc_mbox_timeout; + psli->mbox_tmo.data = (unsigned long) phba; + /* Fabric block timer */ + init_timer(&phba->fabric_block_timer); + phba->fabric_block_timer.function = lpfc_fabric_block_timeout; + phba->fabric_block_timer.data = (unsigned long) phba; + /* EA polling mode timer */ + init_timer(&phba->eratt_poll); + phba->eratt_poll.function = lpfc_poll_eratt; + phba->eratt_poll.data = (unsigned long) phba; + /* + * We need to do a READ_CONFIG mailbox command here before + * calling lpfc_get_cfgparam. For VFs this will report the + * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. + * All of the resources allocated + * for this Port are tied to these values. + */ + /* Get all the module params for configuring this host */ + lpfc_get_cfgparam(phba); + phba->max_vpi = LPFC_MAX_VPI; + /* This will be set to correct value after the read_config mbox */ + phba->max_vports = 0; + + /* Program the default value of vlan_id and fc_map */ + phba->valid_vlan = 0; + phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; + phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; + phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; + + /* + * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be dynamically calculated. + * 2 segments are added since the IOCB needs a command and response bde. + * To insure that the scsi sgl does not cross a 4k page boundary only + * sgl sizes of 1k, 2k, 4k, and 8k are supported. + * Table of sgl sizes and seg_cnt: + * sgl size, sg_seg_cnt total seg + * 1k 50 52 + * 2k 114 116 + * 4k 242 244 + * 8k 498 500 + * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 + * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 + * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 + * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 + */ + if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) + phba->cfg_sg_seg_cnt = 50; + else if (phba->cfg_sg_seg_cnt <= 114) + phba->cfg_sg_seg_cnt = 114; + else if (phba->cfg_sg_seg_cnt <= 242) + phba->cfg_sg_seg_cnt = 242; + else + phba->cfg_sg_seg_cnt = 498; + + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); + phba->cfg_sg_dma_buf_size += + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + + /* Initialize buffer queue management fields */ + hbq_count = lpfc_sli_hbq_count(); + for (i = 0; i < hbq_count; ++i) + INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); + INIT_LIST_HEAD(&phba->rb_pend_list); + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; + + /* + * Initialize the SLI Layer to run with lpfc SLI4 HBAs. + */ + /* Initialize the Abort scsi buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + /* This abort list used by worker thread */ + spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); + + /* + * Initialize dirver internal slow-path work queues + */ + + /* Driver internel slow-path CQ Event pool */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); + /* Response IOCB work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); + /* Asynchronous event CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); + /* Fast-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); + /* Slow-path XRI aborted CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Receive queue CQ Event work queue list */ + INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); + + /* Initialize the driver internal SLI layer lists. */ + lpfc_sli_setup(phba); + lpfc_sli_queue_setup(phba); + + /* Allocate device driver memory */ + rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); + if (rc) + return -ENOMEM; + + /* Create the bootstrap mailbox command */ + rc = lpfc_create_bootstrap_mbox(phba); + if (unlikely(rc)) + goto out_free_mem; + + /* Set up the host's endian order with the device. */ + rc = lpfc_setup_endian_order(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Set up the hba's configuration parameters. */ + rc = lpfc_sli4_read_config(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Perform a function reset */ + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) + goto out_free_bsmbx; + + /* Create all the SLI4 queues */ + rc = lpfc_sli4_queue_create(phba); + if (rc) + goto out_free_bsmbx; + + /* Create driver internal CQE event pool */ + rc = lpfc_sli4_cq_event_pool_create(phba); + if (rc) + goto out_destroy_queue; + + /* Initialize and populate the iocb list per host */ + rc = lpfc_init_sgl_list(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1400 Failed to initialize sgl list.\n"); + goto out_destroy_cq_event_pool; + } + rc = lpfc_init_active_sgl_array(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1430 Failed to initialize sgl list.\n"); + goto out_free_sgl_list; + } + + rc = lpfc_sli4_init_rpi_hdrs(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1432 Failed to initialize rpi headers.\n"); + goto out_free_active_sgl; + } + + phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_eq_hdl) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2572 Failed allocate memory for fast-path " + "per-EQ handle array\n"); + goto out_remove_rpi_hdrs; + } + + phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * + phba->sli4_hba.cfg_eqn), GFP_KERNEL); + if (!phba->sli4_hba.msix_entries) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2573 Failed allocate memory for msi-x " + "interrupt vector entries\n"); + goto out_free_fcp_eq_hdl; + } + + return rc; + +out_free_fcp_eq_hdl: + kfree(phba->sli4_hba.fcp_eq_hdl); +out_remove_rpi_hdrs: + lpfc_sli4_remove_rpi_hdrs(phba); +out_free_active_sgl: + lpfc_free_active_sgl(phba); +out_free_sgl_list: + lpfc_free_sgl_list(phba); +out_destroy_cq_event_pool: + lpfc_sli4_cq_event_pool_destroy(phba); +out_destroy_queue: + lpfc_sli4_queue_destroy(phba); +out_free_bsmbx: + lpfc_destroy_bootstrap_mbox(phba); +out_free_mem: + lpfc_mem_free(phba); + return rc; +} + +/** + * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up + * specific for supporting the SLI-4 HBA device it attached to. + **/ +static void +lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) +{ + struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + + /* unregister default FCFI from the HBA */ + lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); + + /* Free the default FCR table */ + lpfc_sli_remove_dflt_fcf(phba); + + /* Free memory allocated for msi-x interrupt vector entries */ + kfree(phba->sli4_hba.msix_entries); + + /* Free memory allocated for fast-path work queue handles */ + kfree(phba->sli4_hba.fcp_eq_hdl); + + /* Free the allocated rpi headers. */ + lpfc_sli4_remove_rpi_hdrs(phba); + + /* Free the ELS sgl list */ + lpfc_free_active_sgl(phba); + lpfc_free_sgl_list(phba); + + /* Free the SCSI sgl management array */ + kfree(phba->sli4_hba.lpfc_scsi_psb_array); + + /* Free the SLI4 queues */ + lpfc_sli4_queue_destroy(phba); + + /* Free the completion queue EQ event pool */ + lpfc_sli4_cq_event_release_all(phba); + lpfc_sli4_cq_event_pool_destroy(phba); + + /* Reset SLI4 HBA FCoE function */ + lpfc_pci_function_reset(phba); + + /* Free the bsmbx region. */ + lpfc_destroy_bootstrap_mbox(phba); + + /* Free the SLI Layer memory with SLI4 HBAs */ + lpfc_mem_free_all(phba); + + /* Free the current connect table */ + list_for_each_entry_safe(conn_entry, next_conn_entry, + &phba->fcf_conn_rec_list, list) + kfree(conn_entry); + + return; +} + +/** + * lpfc_init_api_table_setup - Set up init api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the device INIT interface API function jump table + * in @phba struct. + * + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; + phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; + phba->lpfc_stop_port = lpfc_stop_port_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; + phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; + phba->lpfc_stop_port = lpfc_stop_port_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1431 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + return 0; +} + +/** + * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources before the + * device specific resource setup to support the HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) +{ + /* + * Driver resources common to all SLI revisions + */ + atomic_set(&phba->fast_event_count, 0); + spin_lock_init(&phba->hbalock); + + /* Initialize ndlp management spinlock */ + spin_lock_init(&phba->ndlp_lock); + + INIT_LIST_HEAD(&phba->port_list); + INIT_LIST_HEAD(&phba->work_list); + init_waitqueue_head(&phba->wait_4_mlo_m_q); + + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); + + /* Initialize the scsi buffer list used by driver for scsi IO */ + spin_lock_init(&phba->scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + + /* Initialize the fabric iocb list */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); + + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); + + /* Initialize FCF connection rec list */ + INIT_LIST_HEAD(&phba->fcf_conn_rec_list); + + return 0; +} + +/** + * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the driver internal resources after the + * device specific resource setup to support the HBA device it attached to. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) +{ + int error; + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + return error; + } + + return 0; +} + +/** + * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the driver internal resources set up after + * the device specific resource setup for supporting the HBA device it + * attached to. + **/ +static void +lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) +{ + /* Stop kernel worker thread */ + kthread_stop(phba->worker_thread); +} + +/** + * lpfc_free_iocb_list - Free iocb list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's IOCB list and memory. + **/ +static void +lpfc_free_iocb_list(struct lpfc_hba *phba) +{ + struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; + + spin_lock_irq(&phba->hbalock); + list_for_each_entry_safe(iocbq_entry, iocbq_next, + &phba->lpfc_iocb_list, list) { + list_del(&iocbq_entry->list); + kfree(iocbq_entry); + phba->total_iocbq_bufs--; + } + spin_unlock_irq(&phba->hbalock); + + return; +} + +/** + * lpfc_init_iocb_list - Allocate and initialize iocb list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and initizlize the driver's IOCB + * list and set up the IOCB tag array accordingly. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) +{ + struct lpfc_iocbq *iocbq_entry = NULL; + uint16_t iotag; + int i; + + /* Initialize and populate the iocb list per host. */ + INIT_LIST_HEAD(&phba->lpfc_iocb_list); + for (i = 0; i < iocb_count; i++) { + iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); + if (iocbq_entry == NULL) { + printk(KERN_ERR "%s: only allocated %d iocbs of " + "expected %d count. Unloading driver.\n", + __func__, i, LPFC_IOCB_LIST_CNT); + goto out_free_iocbq; + } + + iotag = lpfc_sli_next_iotag(phba, iocbq_entry); + if (iotag == 0) { + kfree(iocbq_entry); + printk(KERN_ERR "%s: failed to allocate IOTAG. " + "Unloading driver.\n", __func__); + goto out_free_iocbq; + } + iocbq_entry->sli4_xritag = NO_XRI; + + spin_lock_irq(&phba->hbalock); + list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); + phba->total_iocbq_bufs++; + spin_unlock_irq(&phba->hbalock); + } + + return 0; + +out_free_iocbq: + lpfc_free_iocb_list(phba); + + return -ENOMEM; +} + +/** + * lpfc_free_sgl_list - Free sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver's sgl list and memory. + **/ +static void +lpfc_free_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + LIST_HEAD(sglq_list); + int rc = 0; + + spin_lock_irq(&phba->hbalock); + list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(sglq_entry, sglq_next, + &sglq_list, list) { + list_del(&sglq_entry->list); + lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); + kfree(sglq_entry); + phba->sli4_hba.total_sglq_bufs--; + } + rc = lpfc_sli4_remove_all_sgl_pages(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2005 Unable to deregister pages from HBA: %x", rc); + } + kfree(phba->sli4_hba.lpfc_els_sgl_array); +} + +/** + * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate the driver's active sgl memory. + * This array will hold the sglq_entry's for active IOs. + **/ +static int +lpfc_init_active_sgl_array(struct lpfc_hba *phba) +{ + int size; + size = sizeof(struct lpfc_sglq *); + size *= phba->sli4_hba.max_cfg_param.max_xri; + + phba->sli4_hba.lpfc_sglq_active_list = + kzalloc(size, GFP_KERNEL); + if (!phba->sli4_hba.lpfc_sglq_active_list) + return -ENOMEM; + return 0; +} + +/** + * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to walk through the array of active sglq entries + * and free all of the resources. + * This is just a place holder for now. + **/ +static void +lpfc_free_active_sgl(struct lpfc_hba *phba) +{ + kfree(phba->sli4_hba.lpfc_sglq_active_list); +} + +/** + * lpfc_init_sgl_list - Allocate and initialize sgl list. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and initizlize the driver's sgl + * list and set up the sgl xritag tag array accordingly. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_init_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry = NULL; + int i; + int els_xri_cnt; + + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2400 lpfc_init_sgl_list els %d.\n", + els_xri_cnt); + /* Initialize and populate the sglq list per host/VF. */ + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); + + /* Sanity check on XRI management */ + if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2562 No room left for SCSI XRI allocation: " + "max_xri=%d, els_xri=%d\n", + phba->sli4_hba.max_cfg_param.max_xri, + els_xri_cnt); + return -ENOMEM; + } + + /* Allocate memory for the ELS XRI management array */ + phba->sli4_hba.lpfc_els_sgl_array = + kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), + GFP_KERNEL); + + if (!phba->sli4_hba.lpfc_els_sgl_array) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2401 Failed to allocate memory for ELS " + "XRI management array of size %d.\n", + els_xri_cnt); + return -ENOMEM; + } + + /* Keep the SCSI XRI into the XRI management array */ + phba->sli4_hba.scsi_xri_max = + phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; + phba->sli4_hba.scsi_xri_cnt = 0; + + phba->sli4_hba.lpfc_scsi_psb_array = + kzalloc((sizeof(struct lpfc_scsi_buf *) * + phba->sli4_hba.scsi_xri_max), GFP_KERNEL); + + if (!phba->sli4_hba.lpfc_scsi_psb_array) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2563 Failed to allocate memory for SCSI " + "XRI management array of size %d.\n", + phba->sli4_hba.scsi_xri_max); + kfree(phba->sli4_hba.lpfc_els_sgl_array); + return -ENOMEM; + } + + for (i = 0; i < els_xri_cnt; i++) { + sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); + if (sglq_entry == NULL) { + printk(KERN_ERR "%s: only allocated %d sgls of " + "expected %d count. Unloading driver.\n", + __func__, i, els_xri_cnt); + goto out_free_mem; + } + + sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); + if (sglq_entry->sli4_xritag == NO_XRI) { + kfree(sglq_entry); + printk(KERN_ERR "%s: failed to allocate XRI.\n" + "Unloading driver.\n", __func__); + goto out_free_mem; + } + sglq_entry->buff_type = GEN_BUFF_TYPE; + sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); + if (sglq_entry->virt == NULL) { + kfree(sglq_entry); + printk(KERN_ERR "%s: failed to allocate mbuf.\n" + "Unloading driver.\n", __func__); + goto out_free_mem; + } + sglq_entry->sgl = sglq_entry->virt; + memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); + + /* The list order is used by later block SGL registraton */ + spin_lock_irq(&phba->hbalock); + list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); + phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; + phba->sli4_hba.total_sglq_bufs++; + spin_unlock_irq(&phba->hbalock); + } + return 0; + +out_free_mem: + kfree(phba->sli4_hba.lpfc_scsi_psb_array); + lpfc_free_sgl_list(phba); + return -ENOMEM; +} + +/** + * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a PAGE_SIZE memory region to the port to hold up to + * PAGE_SIZE modulo 64 rpi context headers. + * No locks are held here because this is an initialization routine + * called only from probe or lpfc_online when interrupts are not + * enabled and the driver is reinitializing the device. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) +{ + int rc = 0; + int longs; + uint16_t rpi_count; + struct lpfc_rpi_hdr *rpi_hdr; + + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); + + /* + * Provision an rpi bitmask range for discovery. The total count + * is the difference between max and base + 1. + */ + rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + + phba->sli4_hba.max_cfg_param.max_rpi - 1; + + longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; + phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), + GFP_KERNEL); + if (!phba->sli4_hba.rpi_bmask) + return -ENOMEM; + + rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); + if (!rpi_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0391 Error during rpi post operation\n"); + lpfc_sli4_remove_rpis(phba); + rc = -ENODEV; + } + + return rc; +} + +/** + * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate a single 4KB memory region to + * support rpis and stores them in the phba. This single region + * provides support for up to 64 rpis. The region is used globally + * by the device. + * + * Returns: + * A valid rpi hdr on success. + * A NULL pointer on any failure. + **/ +struct lpfc_rpi_hdr * +lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) +{ + uint16_t rpi_limit, curr_rpi_range; + struct lpfc_dmabuf *dmabuf; + struct lpfc_rpi_hdr *rpi_hdr; + + rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + + phba->sli4_hba.max_cfg_param.max_rpi - 1; + + spin_lock_irq(&phba->hbalock); + curr_rpi_range = phba->sli4_hba.next_rpi; + spin_unlock_irq(&phba->hbalock); + + /* + * The port has a limited number of rpis. The increment here + * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value + * and to allow the full max_rpi range per port. + */ + if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) + return NULL; + + /* + * First allocate the protocol header region for the port. The + * port expects a 4KB DMA-mapped memory region that is 4K aligned. + */ + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return NULL; + + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + LPFC_HDR_TEMPLATE_SIZE, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + rpi_hdr = NULL; + goto err_free_dmabuf; + } + + memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); + if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { + rpi_hdr = NULL; + goto err_free_coherent; + } + + /* Save the rpi header data for cleanup later. */ + rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); + if (!rpi_hdr) + goto err_free_coherent; + + rpi_hdr->dmabuf = dmabuf; + rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; + rpi_hdr->page_count = 1; + spin_lock_irq(&phba->hbalock); + rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; + list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); + + /* + * The next_rpi stores the next module-64 rpi value to post + * in any subsequent rpi memory region postings. + */ + phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; + spin_unlock_irq(&phba->hbalock); + return rpi_hdr; + + err_free_coherent: + dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, + dmabuf->virt, dmabuf->phys); + err_free_dmabuf: + kfree(dmabuf); + return NULL; +} + +/** + * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to enable the MSI-X interrupt vectors. The kernel - * function pci_enable_msix() is called to enable the MSI-X vectors. Note that - * pci_enable_msix(), once invoked, enables either all or nothing, depending - * on the current availability of PCI vector resources. The device driver is - * responsible for calling the individual request_irq() to register each MSI-X - * vector with a interrupt handler, which is done in this function. Note that + * This routine is invoked to remove all memory resources allocated + * to support rpis. This routine presumes the caller has released all + * rpis consumed by fabric or port logins and is prepared to have + * the header pages removed. + **/ +void +lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) +{ + struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; + + list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, + &phba->sli4_hba.lpfc_rpi_hdr_list, list) { + list_del(&rpi_hdr->list); + dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, + rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); + kfree(rpi_hdr->dmabuf); + kfree(rpi_hdr); + } + + phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; + memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); +} + +/** + * lpfc_hba_alloc - Allocate driver hba data structure for a device. + * @pdev: pointer to pci device data structure. + * + * This routine is invoked to allocate the driver hba data structure for an + * HBA device. If the allocation is successful, the phba reference to the + * PCI device data structure is set. + * + * Return codes + * pointer to @phba - sucessful + * NULL - error + **/ +static struct lpfc_hba * +lpfc_hba_alloc(struct pci_dev *pdev) +{ + struct lpfc_hba *phba; + + /* Allocate memory for HBA structure */ + phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); + if (!phba) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1417 Failed to allocate hba struct.\n"); + return NULL; + } + + /* Set reference to PCI device in HBA structure */ + phba->pcidev = pdev; + + /* Assign an unused board number */ + phba->brd_no = lpfc_get_instance(); + if (phba->brd_no < 0) { + kfree(phba); + return NULL; + } + + return phba; +} + +/** + * lpfc_hba_free - Free driver hba data structure with a device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the driver hba data structure with an + * HBA device. + **/ +static void +lpfc_hba_free(struct lpfc_hba *phba) +{ + /* Release the driver assigned board number */ + idr_remove(&lpfc_hba_index, phba->brd_no); + + kfree(phba); + return; +} + +/** + * lpfc_create_shost - Create hba physical port with associated scsi host. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to create HBA physical port and associate a SCSI + * host with it. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_create_shost(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct Scsi_Host *shost; + + /* Initialize HBA FC structure */ + phba->fc_edtov = FF_DEF_EDTOV; + phba->fc_ratov = FF_DEF_RATOV; + phba->fc_altov = FF_DEF_ALTOV; + phba->fc_arbtov = FF_DEF_ARBTOV; + + vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); + if (!vport) + return -ENODEV; + + shost = lpfc_shost_from_vport(vport); + phba->pport = vport; + lpfc_debugfs_initialize(vport); + /* Put reference to SCSI host to driver's device private data */ + pci_set_drvdata(phba->pcidev, shost); + + return 0; +} + +/** + * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to destroy HBA physical port and the associated + * SCSI host. + **/ +static void +lpfc_destroy_shost(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + + /* Destroy physical port that associated with the SCSI host */ + destroy_port(vport); + + return; +} + +/** + * lpfc_setup_bg - Setup Block guard structures and debug areas. + * @phba: pointer to lpfc hba data structure. + * @shost: the shost to be used to detect Block guard settings. + * + * This routine sets up the local Block guard protocol settings for @shost. + * This routine also allocates memory for debugging bg buffers. + **/ +static void +lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) +{ + int pagecnt = 10; + if (lpfc_prot_mask && lpfc_prot_guard) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1478 Registering BlockGuard with the " + "SCSI layer\n"); + scsi_host_set_prot(shost, lpfc_prot_mask); + scsi_host_set_guard(shost, lpfc_prot_guard); + } + if (!_dump_buf_data) { + while (pagecnt) { + spin_lock_init(&_dump_buf_lock); + _dump_buf_data = + (char *) __get_free_pages(GFP_KERNEL, pagecnt); + if (_dump_buf_data) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_data at 0x%p\n", + (1 << pagecnt), _dump_buf_data); + _dump_buf_data_order = pagecnt; + memset(_dump_buf_data, 0, + ((1 << PAGE_SHIFT) << pagecnt)); + break; + } else + --pagecnt; + } + if (!_dump_buf_data_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + } else + printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" + "\n", _dump_buf_data); + if (!_dump_buf_dif) { + while (pagecnt) { + _dump_buf_dif = + (char *) __get_free_pages(GFP_KERNEL, pagecnt); + if (_dump_buf_dif) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_dif at 0x%p\n", + (1 << pagecnt), _dump_buf_dif); + _dump_buf_dif_order = pagecnt; + memset(_dump_buf_dif, 0, + ((1 << PAGE_SHIFT) << pagecnt)); + break; + } else + --pagecnt; + } + if (!_dump_buf_dif_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + } else + printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", + _dump_buf_dif); +} + +/** + * lpfc_post_init_setup - Perform necessary device post initialization setup. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to perform all the necessary post initialization + * setup for the device. + **/ +static void +lpfc_post_init_setup(struct lpfc_hba *phba) +{ + struct Scsi_Host *shost; + struct lpfc_adapter_event_header adapter_event; + + /* Get the default values for Model Name and Description */ + lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); + + /* + * hba setup may have changed the hba_queue_depth so we need to + * adjust the value of can_queue. + */ + shost = pci_get_drvdata(phba->pcidev); + shost->can_queue = phba->cfg_hba_queue_depth - 10; + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) + lpfc_setup_bg(phba, shost); + + lpfc_host_attrib_init(shost); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + spin_lock_irq(shost->host_lock); + lpfc_poll_start_timer(phba); + spin_unlock_irq(shost->host_lock); + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0428 Perform SCSI scan\n"); + /* Send board arrival event to upper layer */ + adapter_event.event_type = FC_REG_ADAPTER_EVENT; + adapter_event.subcategory = LPFC_EVENT_ARRIVAL; + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(adapter_event), + (char *) &adapter_event, + LPFC_NL_VENDOR_ID); + return; +} + +/** + * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the PCI device memory space for device + * with SLI-3 interface spec. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + unsigned long bar0map_len, bar2map_len; + int i, hbq_count; + void *ptr; + int error = -ENODEV; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return error; + else + pdev = phba->pcidev; + + /* Set the device DMA mask size */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) + return error; + + /* Get the bus address of Bar0 and Bar2 and the number of bytes + * required by each mapping. + */ + phba->pci_bar0_map = pci_resource_start(pdev, 0); + bar0map_len = pci_resource_len(pdev, 0); + + phba->pci_bar2_map = pci_resource_start(pdev, 2); + bar2map_len = pci_resource_len(pdev, 2); + + /* Map HBA SLIM to a kernel virtual address. */ + phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->slim_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLIM memory.\n"); + goto out; + } + + /* Map HBA Control Registers to a kernel virtual address. */ + phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->ctrl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for HBA control registers.\n"); + goto out_iounmap_slim; + } + + /* Allocate memory for SLI-2 structures */ + phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, + SLI2_SLIM_SIZE, + &phba->slim2p.phys, + GFP_KERNEL); + if (!phba->slim2p.virt) + goto out_iounmap; + + memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); + phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); + phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); + phba->IOCBs = (phba->slim2p.virt + + offsetof(struct lpfc_sli2_slim, IOCBs)); + + phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, + lpfc_sli_hbq_size(), + &phba->hbqslimp.phys, + GFP_KERNEL); + if (!phba->hbqslimp.virt) + goto out_free_slim; + + hbq_count = lpfc_sli_hbq_count(); + ptr = phba->hbqslimp.virt; + for (i = 0; i < hbq_count; ++i) { + phba->hbqs[i].hbq_virt = ptr; + INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); + ptr += (lpfc_hbq_defs[i]->entry_count * + sizeof(struct lpfc_hbq_entry)); + } + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; + + memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); + + INIT_LIST_HEAD(&phba->rb_pend_list); + + phba->MBslimaddr = phba->slim_memmap_p; + phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; + phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; + phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; + phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; + + return 0; + +out_free_slim: + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); +out_iounmap: + iounmap(phba->ctrl_regs_memmap_p); +out_iounmap_slim: + iounmap(phba->slim_memmap_p); +out: + return error; +} + +/** + * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the PCI device memory space for device + * with SLI-3 interface spec. + **/ +static void +lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + + /* Free coherent DMA memory allocated */ + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), + phba->hbqslimp.virt, phba->hbqslimp.phys); + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); + + /* I/O memory unmap */ + iounmap(phba->ctrl_regs_memmap_p); + iounmap(phba->slim_memmap_p); + + return; +} + +/** + * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to wait for SLI4 device Power On Self Test (POST) + * done and check status. + * + * Return 0 if successful, otherwise -ENODEV. + **/ +int +lpfc_sli4_post_status_check(struct lpfc_hba *phba) +{ + struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; + uint32_t onlnreg0, onlnreg1; + int i, port_error = -ENODEV; + + if (!phba->sli4_hba.STAregaddr) + return -ENODEV; + + /* With uncoverable error, log the error message and return error */ + onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); + onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); + if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { + uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); + uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); + if (uerrlo_reg.word0 || uerrhi_reg.word0) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1422 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "online0_reg=0x%x, online1_reg=0x%x\n", + uerrlo_reg.word0, uerrhi_reg.word0, + onlnreg0, onlnreg1); + } + return -ENODEV; + } + + /* Wait up to 30 seconds for the SLI Port POST done and ready */ + for (i = 0; i < 3000; i++) { + sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); + /* Encounter fatal POST error, break out */ + if (bf_get(lpfc_hst_state_perr, &sta_reg)) { + port_error = -ENODEV; + break; + } + if (LPFC_POST_STAGE_ARMFW_READY == + bf_get(lpfc_hst_state_port_status, &sta_reg)) { + port_error = 0; + break; + } + msleep(10); + } + + if (port_error) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1408 Failure HBA POST Status: sta_reg=0x%x, " + "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " + "dl=x%x, pstatus=x%x\n", sta_reg.word0, + bf_get(lpfc_hst_state_perr, &sta_reg), + bf_get(lpfc_hst_state_sfi, &sta_reg), + bf_get(lpfc_hst_state_nip, &sta_reg), + bf_get(lpfc_hst_state_ipc, &sta_reg), + bf_get(lpfc_hst_state_xrom, &sta_reg), + bf_get(lpfc_hst_state_dl, &sta_reg), + bf_get(lpfc_hst_state_port_status, &sta_reg)); + + /* Log device information */ + scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " + "FeatureL1=0x%x, FeatureL2=0x%x\n", + bf_get(lpfc_scratchpad_chiptype, &scratchpad), + bf_get(lpfc_scratchpad_slirev, &scratchpad), + bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), + bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); + + return port_error; +} + +/** + * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up SLI4 BAR0 PCI config space register + * memory map. + **/ +static void +lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) +{ + phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_UERR_STATUS_LO; + phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_UERR_STATUS_HI; + phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_ONLINE0; + phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_ONLINE1; + phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + + LPFC_SCRATCHPAD; +} + +/** + * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up SLI4 BAR1 control status register (CSR) + * memory map. + **/ +static void +lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) +{ + + phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_STATE; + phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_ISR0; + phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_IMR0; + phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + + LPFC_HST_ISCR0; + return; +} + +/** + * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. + * @phba: pointer to lpfc hba data structure. + * @vf: virtual function number + * + * This routine is invoked to set up SLI4 BAR2 doorbell register memory map + * based on the given viftual function number, @vf. + * + * Return 0 if successful, otherwise -ENODEV. + **/ +static int +lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) +{ + if (vf > LPFC_VIR_FUNC_MAX) + return -ENODEV; + + phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); + phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); + phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); + phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); + phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + + vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); + return 0; +} + +/** + * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to create the bootstrap mailbox + * region consistent with the SLI-4 interface spec. This + * routine allocates all memory necessary to communicate + * mailbox commands to the port and sets up all alignment + * needs. No locks are expected to be held when calling + * this routine. + * + * Return codes + * 0 - sucessful + * ENOMEM - could not allocated memory. + **/ +static int +lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) +{ + uint32_t bmbx_size; + struct lpfc_dmabuf *dmabuf; + struct dma_address *dma_address; + uint32_t pa_addr; + uint64_t phys_addr; + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + /* + * The bootstrap mailbox region is comprised of 2 parts + * plus an alignment restriction of 16 bytes. + */ + bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + bmbx_size, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + return -ENOMEM; + } + memset(dmabuf->virt, 0, bmbx_size); + + /* + * Initialize the bootstrap mailbox pointers now so that the register + * operations are simple later. The mailbox dma address is required + * to be 16-byte aligned. Also align the virtual memory as each + * maibox is copied into the bmbx mailbox region before issuing the + * command to the port. + */ + phba->sli4_hba.bmbx.dmabuf = dmabuf; + phba->sli4_hba.bmbx.bmbx_size = bmbx_size; + + phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, + LPFC_ALIGN_16_BYTE); + phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, + LPFC_ALIGN_16_BYTE); + + /* + * Set the high and low physical addresses now. The SLI4 alignment + * requirement is 16 bytes and the mailbox is posted to the port + * as two 30-bit addresses. The other data is a bit marking whether + * the 30-bit address is the high or low address. + * Upcast bmbx aphys to 64bits so shift instruction compiles + * clean on 32 bit machines. + */ + dma_address = &phba->sli4_hba.bmbx.dma_address; + phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; + pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); + dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | + LPFC_BMBX_BIT1_ADDR_HI); + + pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); + dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | + LPFC_BMBX_BIT1_ADDR_LO); + return 0; +} + +/** + * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to teardown the bootstrap mailbox + * region and release all host resources. This routine requires + * the caller to ensure all mailbox commands recovered, no + * additional mailbox comands are sent, and interrupts are disabled + * before calling this routine. + * + **/ +static void +lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) +{ + dma_free_coherent(&phba->pcidev->dev, + phba->sli4_hba.bmbx.bmbx_size, + phba->sli4_hba.bmbx.dmabuf->virt, + phba->sli4_hba.bmbx.dmabuf->phys); + + kfree(phba->sli4_hba.bmbx.dmabuf); + memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); +} + +/** + * lpfc_sli4_read_config - Get the config parameters. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to read the configuration parameters from the HBA. + * The configuration parameters are used to set the base and maximum values + * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource + * allocation for the port. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static int +lpfc_sli4_read_config(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *pmb; + struct lpfc_mbx_read_config *rd_config; + uint32_t rc = 0; + + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2011 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } + + lpfc_read_config(phba, pmb); + + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2012 Mailbox failed , mbxCmd x%x " + "READ_CONFIG, mbxStatus x%x\n", + bf_get(lpfc_mqe_command, &pmb->u.mqe), + bf_get(lpfc_mqe_status, &pmb->u.mqe)); + rc = -EIO; + } else { + rd_config = &pmb->u.mqe.un.rd_config; + phba->sli4_hba.max_cfg_param.max_xri = + bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); + phba->sli4_hba.max_cfg_param.xri_base = + bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); + phba->sli4_hba.max_cfg_param.max_vpi = + bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + phba->sli4_hba.max_cfg_param.vpi_base = + bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_rpi = + bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); + phba->sli4_hba.max_cfg_param.rpi_base = + bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_vfi = + bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); + phba->sli4_hba.max_cfg_param.vfi_base = + bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_fcfi = + bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); + phba->sli4_hba.max_cfg_param.fcfi_base = + bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); + phba->sli4_hba.max_cfg_param.max_eq = + bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_rq = + bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_wq = + bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); + phba->sli4_hba.max_cfg_param.max_cq = + bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); + phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); + phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; + phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; + phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; + phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; + phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; + phba->max_vports = phba->max_vpi; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2003 cfg params XRI(B:%d M:%d), " + "VPI(B:%d M:%d) " + "VFI(B:%d M:%d) " + "RPI(B:%d M:%d) " + "FCFI(B:%d M:%d)\n", + phba->sli4_hba.max_cfg_param.xri_base, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.max_cfg_param.vpi_base, + phba->sli4_hba.max_cfg_param.max_vpi, + phba->sli4_hba.max_cfg_param.vfi_base, + phba->sli4_hba.max_cfg_param.max_vfi, + phba->sli4_hba.max_cfg_param.rpi_base, + phba->sli4_hba.max_cfg_param.max_rpi, + phba->sli4_hba.max_cfg_param.fcfi_base, + phba->sli4_hba.max_cfg_param.max_fcfi); + } + mempool_free(pmb, phba->mbox_mem_pool); + + /* Reset the DFT_HBA_Q_DEPTH to the max xri */ + if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) + phba->cfg_hba_queue_depth = + phba->sli4_hba.max_cfg_param.max_xri; + return rc; +} + +/** + * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to setup the host-side endian order to the + * HBA consistent with the SLI-4 interface spec. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static int +lpfc_setup_endian_order(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t rc = 0; + uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, + HOST_ENDIAN_HIGH_WORD1}; + + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0492 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } + + /* + * The SLI4_CONFIG_SPECIAL mailbox command requires the first two + * words to contain special data values and no other data. + */ + memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); + memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0493 SLI_CONFIG_SPECIAL mailbox failed with " + "status x%x\n", + rc); + rc = -EIO; + } + + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} + +/** + * lpfc_sli4_queue_create - Create all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static int +lpfc_sli4_queue_create(struct lpfc_hba *phba) +{ + struct lpfc_queue *qdesc; + int fcp_eqidx, fcp_cqidx, fcp_wqidx; + int cfg_fcp_wq_count; + int cfg_fcp_eq_count; + + /* + * Sanity check for confiugred queue parameters against the run-time + * device parameters + */ + + /* Sanity check on FCP fast-path WQ parameters */ + cfg_fcp_wq_count = phba->cfg_fcp_wq_count; + if (cfg_fcp_wq_count > + (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { + cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - + LPFC_SP_WQN_DEF; + if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2581 Not enough WQs (%d) from " + "the pci function for supporting " + "FCP WQs (%d)\n", + phba->sli4_hba.max_cfg_param.max_wq, + phba->cfg_fcp_wq_count); + goto out_error; + } + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2582 Not enough WQs (%d) from the pci " + "function for supporting the requested " + "FCP WQs (%d), the actual FCP WQs can " + "be supported: %d\n", + phba->sli4_hba.max_cfg_param.max_wq, + phba->cfg_fcp_wq_count, cfg_fcp_wq_count); + } + /* The actual number of FCP work queues adopted */ + phba->cfg_fcp_wq_count = cfg_fcp_wq_count; + + /* Sanity check on FCP fast-path EQ parameters */ + cfg_fcp_eq_count = phba->cfg_fcp_eq_count; + if (cfg_fcp_eq_count > + (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { + cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - + LPFC_SP_EQN_DEF; + if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2574 Not enough EQs (%d) from the " + "pci function for supporting FCP " + "EQs (%d)\n", + phba->sli4_hba.max_cfg_param.max_eq, + phba->cfg_fcp_eq_count); + goto out_error; + } + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2575 Not enough EQs (%d) from the pci " + "function for supporting the requested " + "FCP EQs (%d), the actual FCP EQs can " + "be supported: %d\n", + phba->sli4_hba.max_cfg_param.max_eq, + phba->cfg_fcp_eq_count, cfg_fcp_eq_count); + } + /* It does not make sense to have more EQs than WQs */ + if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2593 The number of FCP EQs (%d) is more " + "than the number of FCP WQs (%d), take " + "the number of FCP EQs same as than of " + "WQs (%d)\n", cfg_fcp_eq_count, + phba->cfg_fcp_wq_count, + phba->cfg_fcp_wq_count); + cfg_fcp_eq_count = phba->cfg_fcp_wq_count; + } + /* The actual number of FCP event queues adopted */ + phba->cfg_fcp_eq_count = cfg_fcp_eq_count; + /* The overall number of event queues used */ + phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; + + /* + * Create Event Queues (EQs) + */ + + /* Get EQ depth from module parameter, fake the default for now */ + phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; + phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; + + /* Create slow path event queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0496 Failed allocate slow-path EQ\n"); + goto out_error; + } + phba->sli4_hba.sp_eq = qdesc; + + /* Create fast-path FCP Event Queue(s) */ + phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fp_eq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2576 Failed allocate memory for fast-path " + "EQ record array\n"); + goto out_free_sp_eq; + } + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + phba->sli4_hba.eq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0497 Failed allocate fast-path EQ\n"); + goto out_free_fp_eq; + } + phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; + } + + /* + * Create Complete Queues (CQs) + */ + + /* Get CQ depth from module parameter, fake the default for now */ + phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; + phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + + /* Create slow-path Mailbox Command Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0500 Failed allocate slow-path mailbox CQ\n"); + goto out_free_fp_eq; + } + phba->sli4_hba.mbx_cq = qdesc; + + /* Create slow-path ELS Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0501 Failed allocate slow-path ELS CQ\n"); + goto out_free_mbx_cq; + } + phba->sli4_hba.els_cq = qdesc; + + /* Create slow-path Unsolicited Receive Complete Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0502 Failed allocate slow-path USOL RX CQ\n"); + goto out_free_els_cq; + } + phba->sli4_hba.rxq_cq = qdesc; + + /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ + phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2577 Failed allocate memory for fast-path " + "CQ record array\n"); + goto out_free_rxq_cq; + } + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0499 Failed allocate fast-path FCP " + "CQ (%d)\n", fcp_cqidx); + goto out_free_fcp_cq; + } + phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; + } + + /* Create Mailbox Command Queue */ + phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; + phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; + + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, + phba->sli4_hba.mq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0505 Failed allocate slow-path MQ\n"); + goto out_free_fcp_cq; + } + phba->sli4_hba.mbx_wq = qdesc; + + /* + * Create all the Work Queues (WQs) + */ + phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; + phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; + + /* Create slow-path ELS Work Queue */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0504 Failed allocate slow-path ELS WQ\n"); + goto out_free_mbx_wq; + } + phba->sli4_hba.els_wq = qdesc; + + /* Create fast-path FCP Work Queue(s) */ + phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_wq_count), GFP_KERNEL); + if (!phba->sli4_hba.fcp_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2578 Failed allocate memory for fast-path " + "WQ record array\n"); + goto out_free_els_wq; + } + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0503 Failed allocate fast-path FCP " + "WQ (%d)\n", fcp_wqidx); + goto out_free_fcp_wq; + } + phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; + } + + /* + * Create Receive Queue (RQ) + */ + phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; + phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; + + /* Create Receive Queue for header */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0506 Failed allocate receive HRQ\n"); + goto out_free_fcp_wq; + } + phba->sli4_hba.hdr_rq = qdesc; + + /* Create Receive Queue for data */ + qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, + phba->sli4_hba.rq_ecount); + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0507 Failed allocate receive DRQ\n"); + goto out_free_hdr_rq; + } + phba->sli4_hba.dat_rq = qdesc; + + return 0; + +out_free_hdr_rq: + lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); + phba->sli4_hba.hdr_rq = NULL; +out_free_fcp_wq: + for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); + phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; + } + kfree(phba->sli4_hba.fcp_wq); +out_free_els_wq: + lpfc_sli4_queue_free(phba->sli4_hba.els_wq); + phba->sli4_hba.els_wq = NULL; +out_free_mbx_wq: + lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); + phba->sli4_hba.mbx_wq = NULL; +out_free_fcp_cq: + for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); + phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; + } + kfree(phba->sli4_hba.fcp_cq); +out_free_rxq_cq: + lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); + phba->sli4_hba.rxq_cq = NULL; +out_free_els_cq: + lpfc_sli4_queue_free(phba->sli4_hba.els_cq); + phba->sli4_hba.els_cq = NULL; +out_free_mbx_cq: + lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); + phba->sli4_hba.mbx_cq = NULL; +out_free_fp_eq: + for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { + lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); + phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; + } + kfree(phba->sli4_hba.fp_eq); +out_free_sp_eq: + lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); + phba->sli4_hba.sp_eq = NULL; +out_error: + return -ENOMEM; +} + +/** + * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release all the SLI4 queues with the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +static void +lpfc_sli4_queue_destroy(struct lpfc_hba *phba) +{ + int fcp_qidx; + + /* Release mailbox command work queue */ + lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); + phba->sli4_hba.mbx_wq = NULL; + + /* Release ELS work queue */ + lpfc_sli4_queue_free(phba->sli4_hba.els_wq); + phba->sli4_hba.els_wq = NULL; + + /* Release FCP work queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); + kfree(phba->sli4_hba.fcp_wq); + phba->sli4_hba.fcp_wq = NULL; + + /* Release unsolicited receive queue */ + lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); + phba->sli4_hba.hdr_rq = NULL; + lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); + phba->sli4_hba.dat_rq = NULL; + + /* Release unsolicited receive complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); + phba->sli4_hba.rxq_cq = NULL; + + /* Release ELS complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.els_cq); + phba->sli4_hba.els_cq = NULL; + + /* Release mailbox command complete queue */ + lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); + phba->sli4_hba.mbx_cq = NULL; + + /* Release FCP response complete queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); + kfree(phba->sli4_hba.fcp_cq); + phba->sli4_hba.fcp_cq = NULL; + + /* Release fast-path event queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); + kfree(phba->sli4_hba.fp_eq); + phba->sli4_hba.fp_eq = NULL; + + /* Release slow-path event queue */ + lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); + phba->sli4_hba.sp_eq = NULL; + + return; +} + +/** + * lpfc_sli4_queue_setup - Set up all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up all the SLI4 queues for the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_queue_setup(struct lpfc_hba *phba) +{ + int rc = -ENOMEM; + int fcp_eqidx, fcp_cqidx, fcp_wqidx; + int fcp_cq_index = 0; + + /* + * Set up Event Queues (EQs) + */ + + /* Set up slow-path event queue */ + if (!phba->sli4_hba.sp_eq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0520 Slow-path EQ not allocated\n"); + goto out_error; + } + rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, + LPFC_SP_DEF_IMAX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0521 Failed setup of slow-path EQ: " + "rc = 0x%x\n", rc); + goto out_error; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2583 Slow-path EQ setup: queue-id=%d\n", + phba->sli4_hba.sp_eq->queue_id); + + /* Set up fast-path event queue */ + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0522 Fast-path EQ (%d) not " + "allocated\n", fcp_eqidx); + goto out_destroy_fp_eq; + } + rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], + phba->cfg_fcp_imax); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0523 Failed setup of fast-path EQ " + "(%d), rc = 0x%x\n", fcp_eqidx, rc); + goto out_destroy_fp_eq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2584 Fast-path EQ setup: " + "queue[%d]-id=%d\n", fcp_eqidx, + phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); + } + + /* + * Set up Complete Queues (CQs) + */ + + /* Set up slow-path MBOX Complete Queue as the first CQ */ + if (!phba->sli4_hba.mbx_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0528 Mailbox CQ not allocated\n"); + goto out_destroy_fp_eq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, + LPFC_MCQ, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0529 Failed setup of slow-path mailbox CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_fp_eq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.mbx_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up slow-path ELS Complete Queue */ + if (!phba->sli4_hba.els_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0530 ELS CQ not allocated\n"); + goto out_destroy_mbx_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, + LPFC_WCQ, LPFC_ELS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0531 Failed setup of slow-path ELS CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_mbx_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.els_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up slow-path Unsolicited Receive Complete Queue */ + if (!phba->sli4_hba.rxq_cq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0532 USOL RX CQ not allocated\n"); + goto out_destroy_els_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, + LPFC_RCQ, LPFC_USOL); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0533 Failed setup of slow-path USOL RX CQ: " + "rc = 0x%x\n", rc); + goto out_destroy_els_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", + phba->sli4_hba.rxq_cq->queue_id, + phba->sli4_hba.sp_eq->queue_id); + + /* Set up fast-path FCP Response Complete Queue */ + for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { + if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0526 Fast-path FCP CQ (%d) not " + "allocated\n", fcp_cqidx); + goto out_destroy_fcp_cq; + } + rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], + phba->sli4_hba.fp_eq[fcp_cqidx], + LPFC_WCQ, LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0527 Failed setup of fast-path FCP " + "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); + goto out_destroy_fcp_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2588 FCP CQ setup: cq[%d]-id=%d, " + "parent eq[%d]-id=%d\n", + fcp_cqidx, + phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, + fcp_cqidx, + phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); + } + + /* + * Set up all the Work Queues (WQs) + */ + + /* Set up Mailbox Command Queue */ + if (!phba->sli4_hba.mbx_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0538 Slow-path MQ not allocated\n"); + goto out_destroy_fcp_cq; + } + rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, + phba->sli4_hba.mbx_cq, LPFC_MBOX); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0539 Failed setup of slow-path MQ: " + "rc = 0x%x\n", rc); + goto out_destroy_fcp_cq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_cq->queue_id); + + /* Set up slow-path ELS Work Queue */ + if (!phba->sli4_hba.els_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0536 Slow-path ELS WQ not allocated\n"); + goto out_destroy_mbx_wq; + } + rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, + phba->sli4_hba.els_cq, LPFC_ELS); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0537 Failed setup of slow-path ELS WQ: " + "rc = 0x%x\n", rc); + goto out_destroy_mbx_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", + phba->sli4_hba.els_wq->queue_id, + phba->sli4_hba.els_cq->queue_id); + + /* Set up fast-path FCP Work Queue */ + for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { + if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0534 Fast-path FCP WQ (%d) not " + "allocated\n", fcp_wqidx); + goto out_destroy_fcp_wq; + } + rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], + phba->sli4_hba.fcp_cq[fcp_cq_index], + LPFC_FCP); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0535 Failed setup of fast-path FCP " + "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); + goto out_destroy_fcp_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2591 FCP WQ setup: wq[%d]-id=%d, " + "parent cq[%d]-id=%d\n", + fcp_wqidx, + phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, + fcp_cq_index, + phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); + /* Round robin FCP Work Queue's Completion Queue assignment */ + fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); + } + + /* + * Create Receive Queue (RQ) + */ + if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0540 Receive Queue not allocated\n"); + goto out_destroy_fcp_wq; + } + rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, + phba->sli4_hba.rxq_cq, LPFC_USOL); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0541 Failed setup of Receive Queue: " + "rc = 0x%x\n", rc); + goto out_destroy_fcp_wq; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " + "parent cq-id=%d\n", + phba->sli4_hba.hdr_rq->queue_id, + phba->sli4_hba.dat_rq->queue_id, + phba->sli4_hba.rxq_cq->queue_id); + return 0; + +out_destroy_fcp_wq: + for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); +out_destroy_mbx_wq: + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); +out_destroy_fcp_cq: + for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); + lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); +out_destroy_els_cq: + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); +out_destroy_mbx_cq: + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); +out_destroy_fp_eq: + for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) + lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); + lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); +out_error: + return rc; +} + +/** + * lpfc_sli4_queue_unset - Unset all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset all the SLI4 queues with the FCoE HBA + * operation. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +void +lpfc_sli4_queue_unset(struct lpfc_hba *phba) +{ + int fcp_qidx; + + /* Unset mailbox command work queue */ + lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); + /* Unset ELS work queue */ + lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); + /* Unset unsolicited receive queue */ + lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); + /* Unset FCP work queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) + lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); + /* Unset mailbox command complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); + /* Unset ELS complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); + /* Unset unsolicited receive complete queue */ + lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); + /* Unset FCP response complete queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); + /* Unset fast-path event queue */ + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); + /* Unset slow-path event queue */ + lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); +} + +/** + * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate and set up a pool of completion queue + * events. The body of the completion queue event is a completion queue entry + * CQE. For now, this pool is used for the interrupt service routine to queue + * the following HBA completion queue events for the worker thread to process: + * - Mailbox asynchronous events + * - Receive queue completion unsolicited events + * Later, this can be used for all the slow-path events. + * + * Return codes + * 0 - sucessful + * -ENOMEM - No availble memory + **/ +static int +lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + int i; + + for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { + cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); + if (!cq_event) + goto out_pool_create_fail; + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_cqe_event_pool); + } + return 0; + +out_pool_create_fail: + lpfc_sli4_cq_event_pool_destroy(phba); + return -ENOMEM; +} + +/** + * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to free the pool of completion queue events at + * driver unload time. Note that, it is the responsibility of the driver + * cleanup routine to free all the outstanding completion-queue events + * allocated from this pool back into the pool before invoking this routine + * to destroy the pool. + **/ +static void +lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event, *next_cq_event; + + list_for_each_entry_safe(cq_event, next_cq_event, + &phba->sli4_hba.sp_cqe_event_pool, list) { + list_del(&cq_event->list); + kfree(cq_event); + } +} + +/** + * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is the lock free version of the API invoked to allocate a + * completion-queue event from the free pool. + * + * Return: Pointer to the newly allocated completion-queue event if successful + * NULL otherwise. + **/ +struct lpfc_cq_event * +__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event = NULL; + + list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, + struct lpfc_cq_event, list); + return cq_event; +} + +/** + * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is the lock version of the API invoked to allocate a + * completion-queue event from the free pool. + * + * Return: Pointer to the newly allocated completion-queue event if successful + * NULL otherwise. + **/ +struct lpfc_cq_event * +lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + cq_event = __lpfc_sli4_cq_event_alloc(phba); + spin_unlock_irqrestore(&phba->hbalock, iflags); + return cq_event; +} + +/** + * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool + * @phba: pointer to lpfc hba data structure. + * @cq_event: pointer to the completion queue event to be freed. + * + * This routine is the lock free version of the API invoked to release a + * completion-queue event back into the free pool. + **/ +void +__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + struct lpfc_cq_event *cq_event) +{ + list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); +} + +/** + * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool + * @phba: pointer to lpfc hba data structure. + * @cq_event: pointer to the completion queue event to be freed. + * + * This routine is the lock version of the API invoked to release a + * completion-queue event back into the free pool. + **/ +void +lpfc_sli4_cq_event_release(struct lpfc_hba *phba, + struct lpfc_cq_event *cq_event) +{ + unsigned long iflags; + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_sli4_cq_event_release(phba, cq_event); + spin_unlock_irqrestore(&phba->hbalock, iflags); +} + +/** + * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool + * @phba: pointer to lpfc hba data structure. + * + * This routine is to free all the pending completion-queue events to the + * back into the free pool for device reset. + **/ +static void +lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) +{ + LIST_HEAD(cqelist); + struct lpfc_cq_event *cqe; + unsigned long iflags; + + /* Retrieve all the pending WCQEs from pending WCQE lists */ + spin_lock_irqsave(&phba->hbalock, iflags); + /* Pending FCP XRI abort events */ + list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, + &cqelist); + /* Pending ELS XRI abort events */ + list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + &cqelist); + /* Pending asynnc events */ + list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, + &cqelist); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + while (!list_empty(&cqelist)) { + list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); + lpfc_sli4_cq_event_release(phba, cqe); + } +} + +/** + * lpfc_pci_function_reset - Reset pci function. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to request a PCI function reset. It will destroys + * all resources assigned to the PCI function which originates this request. + * + * Return codes + * 0 - sucessful + * ENOMEM - No availble memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_pci_function_reset(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + uint32_t rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0494 Unable to allocate memory for issuing " + "SLI_FUNCTION_RESET mailbox command\n"); + return -ENOMEM; + } + + /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, + LPFC_SLI4_MBX_EMBED); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0495 SLI_FUNCTION_RESET mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands + * @phba: pointer to lpfc hba data structure. + * @cnt: number of nop mailbox commands to send. + * + * This routine is invoked to send a number @cnt of NOP mailbox command and + * wait for each command to complete. + * + * Return: the number of NOP mailbox command completed. + **/ +static int +lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) +{ + LPFC_MBOXQ_t *mboxq; + int length, cmdsent; + uint32_t mbox_tmo; + uint32_t rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (cnt == 0) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2518 Requested to send 0 NOP mailbox cmd\n"); + return cnt; + } + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2519 Unable to allocate memory for issuing " + "NOP mailbox command\n"); + return 0; + } + + /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ + length = (sizeof(struct lpfc_mbx_nop) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); + + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + for (cmdsent = 0; cmdsent < cnt; cmdsent++) { + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + if (rc == MBX_TIMEOUT) + break; + /* Check return status */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, + &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2520 NOP mailbox command failed " + "status x%x add_status x%x mbx " + "status x%x\n", shdr_status, + shdr_add_status, rc); + break; + } + } + + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + + return cmdsent; +} + +/** + * lpfc_sli4_fcfi_unreg - Unregister fcfi to device + * @phba: pointer to lpfc hba data structure. + * @fcfi: fcf index. + * + * This routine is invoked to unregister a FCFI from device. + **/ +void +lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) +{ + LPFC_MBOXQ_t *mbox; + uint32_t mbox_tmo; + int rc; + unsigned long flags; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + + if (!mbox) + return; + + lpfc_unreg_fcfi(mbox, fcfi); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2517 Unregister FCFI command failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mbox->u.mqe)); + else { + spin_lock_irqsave(&phba->hbalock, flags); + /* Mark the FCFI is no longer registered */ + phba->fcf.fcf_flag &= + ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); + spin_unlock_irqrestore(&phba->hbalock, flags); + } +} + +/** + * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up the PCI device memory space for device + * with SLI-4 interface spec. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + unsigned long bar0map_len, bar1map_len, bar2map_len; + int error = -ENODEV; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return error; + else + pdev = phba->pcidev; + + /* Set the device DMA mask size */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) + return error; + + /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the + * number of bytes required by each mapping. They are actually + * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. + */ + phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); + bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); + + phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); + bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); + + phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); + bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); + + /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ + phba->sli4_hba.conf_regs_memmap_p = + ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->sli4_hba.conf_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 PCI config registers.\n"); + goto out; + } + + /* Map SLI4 HBA Control Register base to a kernel virtual address. */ + phba->sli4_hba.ctrl_regs_memmap_p = + ioremap(phba->pci_bar1_map, bar1map_len); + if (!phba->sli4_hba.ctrl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 HBA control registers.\n"); + goto out_iounmap_conf; + } + + /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLI4 HBA doorbell registers.\n"); + goto out_iounmap_ctrl; + } + + /* Set up BAR0 PCI config space register memory map */ + lpfc_sli4_bar0_register_memmap(phba); + + /* Set up BAR1 register memory map */ + lpfc_sli4_bar1_register_memmap(phba); + + /* Set up BAR2 register memory map */ + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); + if (error) + goto out_iounmap_all; + + return 0; + +out_iounmap_all: + iounmap(phba->sli4_hba.drbl_regs_memmap_p); +out_iounmap_ctrl: + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); +out_iounmap_conf: + iounmap(phba->sli4_hba.conf_regs_memmap_p); +out: + return error; +} + +/** + * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the PCI device memory space for device + * with SLI-4 interface spec. + **/ +static void +lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) +{ + struct pci_dev *pdev; + + /* Obtain PCI device reference */ + if (!phba->pcidev) + return; + else + pdev = phba->pcidev; + + /* Free coherent DMA memory allocated */ + + /* Unmap I/O memory space */ + iounmap(phba->sli4_hba.drbl_regs_memmap_p); + iounmap(phba->sli4_hba.ctrl_regs_memmap_p); + iounmap(phba->sli4_hba.conf_regs_memmap_p); + + return; +} + +/** + * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI-X interrupt vectors to device + * with SLI-3 interface specs. The kernel function pci_enable_msix() is + * called to enable the MSI-X vectors. Note that pci_enable_msix(), once + * invoked, enables either all or nothing, depending on the current + * availability of PCI vector resources. The device driver is responsible + * for calling the individual request_irq() to register each MSI-X vector + * with a interrupt handler, which is done in this function. Note that * later when device is unloading, the driver should always call free_irq() * on all MSI-X vectors it has done request_irq() on before calling * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device @@ -2333,7 +6029,7 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) * other values - error **/ static int -lpfc_enable_msix(struct lpfc_hba *phba) +lpfc_sli_enable_msix(struct lpfc_hba *phba) { int rc, i; LPFC_MBOXQ_t *pmb; @@ -2349,20 +6045,21 @@ lpfc_enable_msix(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0420 PCI enable MSI-X failed (%d)\n", rc); goto msi_fail_out; - } else - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0477 MSI-X entry[%d]: vector=x%x " - "message=%d\n", i, - phba->msix_entries[i].vector, - phba->msix_entries[i].entry); + } + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0477 MSI-X entry[%d]: vector=x%x " + "message=%d\n", i, + phba->msix_entries[i].vector, + phba->msix_entries[i].entry); /* * Assign MSI-X vectors to interrupt handlers */ /* vector-0 is associated to slow-path handler */ - rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, - IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); + rc = request_irq(phba->msix_entries[0].vector, + &lpfc_sli_sp_intr_handler, IRQF_SHARED, + LPFC_SP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0421 MSI-X slow-path request_irq failed " @@ -2371,8 +6068,9 @@ lpfc_enable_msix(struct lpfc_hba *phba) } /* vector-1 is associated to fast-path handler */ - rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, - IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); + rc = request_irq(phba->msix_entries[1].vector, + &lpfc_sli_fp_intr_handler, IRQF_SHARED, + LPFC_FP_DRIVER_HANDLER_NAME, phba); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -2401,7 +6099,7 @@ lpfc_enable_msix(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "0351 Config MSI mailbox command failed, " "mbxCmd x%x, mbxStatus x%x\n", - pmb->mb.mbxCommand, pmb->mb.mbxStatus); + pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); goto mbx_fail_out; } @@ -2428,14 +6126,14 @@ msi_fail_out: } /** - * lpfc_disable_msix - Disable MSI-X interrupt mode + * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. * * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode. + * MSI-X interrupt mode to device with SLI-3 interface spec. **/ static void -lpfc_disable_msix(struct lpfc_hba *phba) +lpfc_sli_disable_msix(struct lpfc_hba *phba) { int i; @@ -2444,23 +6142,26 @@ lpfc_disable_msix(struct lpfc_hba *phba) free_irq(phba->msix_entries[i].vector, phba); /* Disable MSI-X */ pci_disable_msix(phba->pcidev); + + return; } /** - * lpfc_enable_msi - Enable MSI interrupt mode + * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to enable the MSI interrupt mode. The kernel - * function pci_enable_msi() is called to enable the MSI vector. The - * device driver is responsible for calling the request_irq() to register - * MSI vector with a interrupt the handler, which is done in this function. + * This routine is invoked to enable the MSI interrupt mode to device with + * SLI-3 interface spec. The kernel function pci_enable_msi() is called to + * enable the MSI vector. The device driver is responsible for calling the + * request_irq() to register MSI vector with a interrupt the handler, which + * is done in this function. * * Return codes * 0 - sucessful * other values - error */ static int -lpfc_enable_msi(struct lpfc_hba *phba) +lpfc_sli_enable_msi(struct lpfc_hba *phba) { int rc; @@ -2474,7 +6175,7 @@ lpfc_enable_msi(struct lpfc_hba *phba) return rc; } - rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, + rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (rc) { pci_disable_msi(phba->pcidev); @@ -2485,17 +6186,17 @@ lpfc_enable_msi(struct lpfc_hba *phba) } /** - * lpfc_disable_msi - Disable MSI interrupt mode + * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to disable the MSI interrupt mode. The driver - * calls free_irq() on MSI vector it has done request_irq() on before - * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and - * a device will be left with MSI enabled and leaks its vector. + * This routine is invoked to disable the MSI interrupt mode to device with + * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has + * done request_irq() on before calling pci_disable_msi(). Failure to do so + * results in a BUG_ON() and a device will be left with MSI enabled and leaks + * its vector. */ - static void -lpfc_disable_msi(struct lpfc_hba *phba) +lpfc_sli_disable_msi(struct lpfc_hba *phba) { free_irq(phba->pcidev->irq, phba); pci_disable_msi(phba->pcidev); @@ -2503,80 +6204,298 @@ lpfc_disable_msi(struct lpfc_hba *phba) } /** - * lpfc_log_intr_mode - Log the active interrupt mode + * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. * @phba: pointer to lpfc hba data structure. - * @intr_mode: active interrupt mode adopted. * - * This routine it invoked to log the currently used active interrupt mode - * to the device. - */ + * This routine is invoked to enable device interrupt and associate driver's + * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface + * spec. Depends on the interrupt mode configured to the driver, the driver + * will try to fallback from the configured interrupt mode to an interrupt + * mode which is supported by the platform, kernel, and device in the order + * of: + * MSI-X -> MSI -> IRQ. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static uint32_t +lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +{ + uint32_t intr_mode = LPFC_INTR_ERROR; + int retval; + + if (cfg_mode == 2) { + /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ + retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); + if (!retval) { + /* Now, try to enable MSI-X interrupt mode */ + retval = lpfc_sli_enable_msix(phba); + if (!retval) { + /* Indicate initialization to MSI-X mode */ + phba->intr_type = MSIX; + intr_mode = 2; + } + } + } + + /* Fallback to MSI if MSI-X initialization failed */ + if (cfg_mode >= 1 && phba->intr_type == NONE) { + retval = lpfc_sli_enable_msi(phba); + if (!retval) { + /* Indicate initialization to MSI mode */ + phba->intr_type = MSI; + intr_mode = 1; + } + } + + /* Fallback to INTx if both MSI-X/MSI initalization failed */ + if (phba->intr_type == NONE) { + retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (!retval) { + /* Indicate initialization to INTx mode */ + phba->intr_type = INTx; + intr_mode = 0; + } + } + return intr_mode; +} + +/** + * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable device interrupt and disassociate the + * driver's interrupt handler(s) from interrupt vector(s) to device with + * SLI-3 interface spec. Depending on the interrupt mode, the driver will + * release the interrupt vector(s) for the message signaled interrupt. + **/ static void -lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) +lpfc_sli_disable_intr(struct lpfc_hba *phba) { - switch (intr_mode) { - case 0: - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0470 Enable INTx interrupt mode.\n"); - break; - case 1: + /* Disable the currently initialized interrupt mode */ + if (phba->intr_type == MSIX) + lpfc_sli_disable_msix(phba); + else if (phba->intr_type == MSI) + lpfc_sli_disable_msi(phba); + else if (phba->intr_type == INTx) + free_irq(phba->pcidev->irq, phba); + + /* Reset interrupt management states */ + phba->intr_type = NONE; + phba->sli.slistat.sli_intr = 0; + + return; +} + +/** + * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI-X interrupt vectors to device + * with SLI-4 interface spec. The kernel function pci_enable_msix() is called + * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, + * enables either all or nothing, depending on the current availability of + * PCI vector resources. The device driver is responsible for calling the + * individual request_irq() to register each MSI-X vector with a interrupt + * handler, which is done in this function. Note that later when device is + * unloading, the driver should always call free_irq() on all MSI-X vectors + * it has done request_irq() on before calling pci_disable_msix(). Failure + * to do so results in a BUG_ON() and a device will be left with MSI-X + * enabled and leaks its vectors. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_enable_msix(struct lpfc_hba *phba) +{ + int rc, index; + + /* Set up MSI-X multi-message vectors */ + for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) + phba->sli4_hba.msix_entries[index].entry = index; + + /* Configure MSI-X capability structure */ + rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, + phba->sli4_hba.cfg_eqn); + if (rc) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0481 Enabled MSI interrupt mode.\n"); - break; - case 2: + "0484 PCI enable MSI-X failed (%d)\n", rc); + goto msi_fail_out; + } + /* Log MSI-X vector assignment */ + for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0480 Enabled MSI-X interrupt mode.\n"); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0482 Illegal interrupt mode.\n"); - break; + "0489 MSI-X entry[%d]: vector=x%x " + "message=%d\n", index, + phba->sli4_hba.msix_entries[index].vector, + phba->sli4_hba.msix_entries[index].entry); + /* + * Assign MSI-X vectors to interrupt handlers + */ + + /* The first vector must associated to slow-path handler for MQ */ + rc = request_irq(phba->sli4_hba.msix_entries[0].vector, + &lpfc_sli4_sp_intr_handler, IRQF_SHARED, + LPFC_SP_DRIVER_HANDLER_NAME, phba); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0485 MSI-X slow-path request_irq failed " + "(%d)\n", rc); + goto msi_fail_out; } - return; + + /* The rest of the vector(s) are associated to fast-path handler(s) */ + for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { + phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; + phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; + rc = request_irq(phba->sli4_hba.msix_entries[index].vector, + &lpfc_sli4_fp_intr_handler, IRQF_SHARED, + LPFC_FP_DRIVER_HANDLER_NAME, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0486 MSI-X fast-path (%d) " + "request_irq failed (%d)\n", index, rc); + goto cfg_fail_out; + } + } + + return rc; + +cfg_fail_out: + /* free the irq already requested */ + for (--index; index >= 1; index--) + free_irq(phba->sli4_hba.msix_entries[index - 1].vector, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + + /* free the irq already requested */ + free_irq(phba->sli4_hba.msix_entries[0].vector, phba); + +msi_fail_out: + /* Unconfigure MSI-X capability structure */ + pci_disable_msix(phba->pcidev); + return rc; } +/** + * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release the MSI-X vectors and then disable the + * MSI-X interrupt mode to device with SLI-4 interface spec. + **/ static void -lpfc_stop_port(struct lpfc_hba *phba) +lpfc_sli4_disable_msix(struct lpfc_hba *phba) { - /* Clear all interrupt enable conditions */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - /* Clear all pending interrupts */ - writel(0xffffffff, phba->HAregaddr); - readl(phba->HAregaddr); /* flush */ + int index; - /* Reset some HBA SLI setup states */ - lpfc_stop_phba_timers(phba); - phba->pport->work_port_events = 0; + /* Free up MSI-X multi-message vectors */ + free_irq(phba->sli4_hba.msix_entries[0].vector, phba); + + for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) + free_irq(phba->sli4_hba.msix_entries[index].vector, + &phba->sli4_hba.fcp_eq_hdl[index - 1]); + /* Disable MSI-X */ + pci_disable_msix(phba->pcidev); + + return; +} + +/** + * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to enable the MSI interrupt mode to device with + * SLI-4 interface spec. The kernel function pci_enable_msi() is called + * to enable the MSI vector. The device driver is responsible for calling + * the request_irq() to register MSI vector with a interrupt the handler, + * which is done in this function. + * + * Return codes + * 0 - sucessful + * other values - error + **/ +static int +lpfc_sli4_enable_msi(struct lpfc_hba *phba) +{ + int rc, index; + + rc = pci_enable_msi(phba->pcidev); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0487 PCI enable MSI mode success.\n"); + else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0488 PCI enable MSI mode failed (%d)\n", rc); + return rc; + } + + rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (rc) { + pci_disable_msi(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0490 MSI request_irq failed (%d)\n", rc); + } + + for (index = 0; index < phba->cfg_fcp_eq_count; index++) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + } + + return rc; +} +/** + * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to disable the MSI interrupt mode to device with + * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has + * done request_irq() on before calling pci_disable_msi(). Failure to do so + * results in a BUG_ON() and a device will be left with MSI enabled and leaks + * its vector. + **/ +static void +lpfc_sli4_disable_msi(struct lpfc_hba *phba) +{ + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); return; } /** - * lpfc_enable_intr - Enable device interrupt + * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable device interrupt and associate driver's - * interrupt handler(s) to interrupt vector(s). Depends on the interrupt - * mode configured to the driver, the driver will try to fallback from the - * configured interrupt mode to an interrupt mode which is supported by the - * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. + * interrupt handler(s) to interrupt vector(s) to device with SLI-4 + * interface spec. Depends on the interrupt mode configured to the driver, + * the driver will try to fallback from the configured interrupt mode to an + * interrupt mode which is supported by the platform, kernel, and device in + * the order of: + * MSI-X -> MSI -> IRQ. * * Return codes - * 0 - sucessful - * other values - error + * 0 - sucessful + * other values - error **/ static uint32_t -lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) +lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { uint32_t intr_mode = LPFC_INTR_ERROR; - int retval; + int retval, index; if (cfg_mode == 2) { - /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ - retval = lpfc_sli_config_port(phba, 3); + /* Preparation before conf_msi mbox cmd */ + retval = 0; if (!retval) { /* Now, try to enable MSI-X interrupt mode */ - retval = lpfc_enable_msix(phba); + retval = lpfc_sli4_enable_msix(phba); if (!retval) { /* Indicate initialization to MSI-X mode */ phba->intr_type = MSIX; @@ -2587,7 +6506,7 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) /* Fallback to MSI if MSI-X initialization failed */ if (cfg_mode >= 1 && phba->intr_type == NONE) { - retval = lpfc_enable_msi(phba); + retval = lpfc_sli4_enable_msi(phba); if (!retval) { /* Indicate initialization to MSI mode */ phba->intr_type = MSI; @@ -2597,34 +6516,39 @@ lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) /* Fallback to INTx if both MSI-X/MSI initalization failed */ if (phba->intr_type == NONE) { - retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, + retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; + for (index = 0; index < phba->cfg_fcp_eq_count; + index++) { + phba->sli4_hba.fcp_eq_hdl[index].idx = index; + phba->sli4_hba.fcp_eq_hdl[index].phba = phba; + } } } return intr_mode; } /** - * lpfc_disable_intr - Disable device interrupt + * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to disable device interrupt and disassociate the - * driver's interrupt handler(s) from interrupt vector(s). Depending on the - * interrupt mode, the driver will release the interrupt vector(s) for the - * message signaled interrupt. + * This routine is invoked to disable device interrupt and disassociate + * the driver's interrupt handler(s) from interrupt vector(s) to device + * with SLI-4 interface spec. Depending on the interrupt mode, the driver + * will release the interrupt vector(s) for the message signaled interrupt. **/ static void -lpfc_disable_intr(struct lpfc_hba *phba) +lpfc_sli4_disable_intr(struct lpfc_hba *phba) { /* Disable the currently initialized interrupt mode */ if (phba->intr_type == MSIX) - lpfc_disable_msix(phba); + lpfc_sli4_disable_msix(phba); else if (phba->intr_type == MSI) - lpfc_disable_msi(phba); + lpfc_sli4_disable_msi(phba); else if (phba->intr_type == INTx) free_irq(phba->pcidev->irq, phba); @@ -2636,263 +6560,233 @@ lpfc_disable_intr(struct lpfc_hba *phba) } /** - * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem - * @pdev: pointer to PCI device - * @pid: pointer to PCI device identifier - * - * This routine is to be registered to the kernel's PCI subsystem. When an - * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at - * PCI device-specific information of the device and driver to see if the - * driver state that it can support this kind of device. If the match is - * successful, the driver core invokes this routine. If this routine - * determines it can claim the HBA, it does all the initialization that it - * needs to do to handle the HBA properly. + * lpfc_unset_hba - Unset SLI3 hba device initialization + * @phba: pointer to lpfc hba data structure. * - * Return code - * 0 - driver can claim the device - * negative value - driver can not claim the device + * This routine is invoked to unset the HBA device initialization steps to + * a device with SLI-3 interface spec. **/ -static int __devinit -lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) +static void +lpfc_unset_hba(struct lpfc_hba *phba) { - struct lpfc_vport *vport = NULL; - struct lpfc_hba *phba; - struct lpfc_sli *psli; - struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; - struct Scsi_Host *shost = NULL; - void *ptr; - unsigned long bar0map_len, bar2map_len; - int error = -ENODEV, retval; - int i, hbq_count; - uint16_t iotag; - uint32_t cfg_mode, intr_mode; - int bars = pci_select_bars(pdev, IORESOURCE_MEM); - struct lpfc_adapter_event_header adapter_event; - - if (pci_enable_device_mem(pdev)) - goto out; - if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) - goto out_disable_device; + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); - if (!phba) - goto out_release_regions; + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); - atomic_set(&phba->fast_event_count, 0); - spin_lock_init(&phba->hbalock); + lpfc_stop_hba_timers(phba); - /* Initialize ndlp management spinlock */ - spin_lock_init(&phba->ndlp_lock); + phba->pport->work_port_events = 0; - phba->pcidev = pdev; + lpfc_sli_hba_down(phba); - /* Assign an unused board number */ - if ((phba->brd_no = lpfc_get_instance()) < 0) - goto out_free_phba; + lpfc_sli_brdrestart(phba); - INIT_LIST_HEAD(&phba->port_list); - init_waitqueue_head(&phba->wait_4_mlo_m_q); - /* - * Get all the module params for configuring this host and then - * establish the host. - */ - lpfc_get_cfgparam(phba); - phba->max_vpi = LPFC_MAX_VPI; + lpfc_sli_disable_intr(phba); - /* Initialize timers used by driver */ - init_timer(&phba->hb_tmofunc); - phba->hb_tmofunc.function = lpfc_hb_timeout; - phba->hb_tmofunc.data = (unsigned long)phba; + return; +} - psli = &phba->sli; - init_timer(&psli->mbox_tmo); - psli->mbox_tmo.function = lpfc_mbox_timeout; - psli->mbox_tmo.data = (unsigned long) phba; - init_timer(&phba->fcp_poll_timer); - phba->fcp_poll_timer.function = lpfc_poll_timeout; - phba->fcp_poll_timer.data = (unsigned long) phba; - init_timer(&phba->fabric_block_timer); - phba->fabric_block_timer.function = lpfc_fabric_block_timeout; - phba->fabric_block_timer.data = (unsigned long) phba; - init_timer(&phba->eratt_poll); - phba->eratt_poll.function = lpfc_poll_eratt; - phba->eratt_poll.data = (unsigned long) phba; +/** + * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to unset the HBA device initialization steps to + * a device with SLI-4 interface spec. + **/ +static void +lpfc_sli4_unset_hba(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport = phba->pport; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - pci_set_master(pdev); - pci_save_state(pdev); - pci_try_set_mwi(pdev); + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); - if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) - if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0) - goto out_idr_remove; + phba->pport->work_port_events = 0; - /* - * Get the bus address of Bar0 and Bar2 and the number of bytes - * required by each mapping. - */ - phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); - bar0map_len = pci_resource_len(phba->pcidev, 0); + lpfc_sli4_hba_down(phba); - phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); - bar2map_len = pci_resource_len(phba->pcidev, 2); + lpfc_sli4_disable_intr(phba); - /* Map HBA SLIM to a kernel virtual address. */ - phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); - if (!phba->slim_memmap_p) { - error = -ENODEV; - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLIM memory.\n"); - goto out_idr_remove; - } - - /* Map HBA Control Registers to a kernel virtual address. */ - phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->ctrl_regs_memmap_p) { - error = -ENODEV; - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for HBA control registers.\n"); - goto out_iounmap_slim; - } + return; +} - /* Allocate memory for SLI-2 structures */ - phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, - SLI2_SLIM_SIZE, - &phba->slim2p.phys, - GFP_KERNEL); - if (!phba->slim2p.virt) - goto out_iounmap; +/** + * lpfc_sli4_hba_unset - Unset the fcoe hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI4 code path to reset the HBA's FCoE + * function. The caller is not required to hold any lock. This routine + * issues PCI function reset mailbox command to reset the FCoE function. + * At the end of the function, it calls lpfc_hba_down_post function to + * free any pending commands. + **/ +static void +lpfc_sli4_hba_unset(struct lpfc_hba *phba) +{ + int wait_cnt = 0; + LPFC_MBOXQ_t *mboxq; - memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); - phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); - phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); - phba->IOCBs = (phba->slim2p.virt + - offsetof(struct lpfc_sli2_slim, IOCBs)); + lpfc_stop_hba_timers(phba); + phba->sli4_hba.intr_enable = 0; - phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, - lpfc_sli_hbq_size(), - &phba->hbqslimp.phys, - GFP_KERNEL); - if (!phba->hbqslimp.virt) - goto out_free_slim; + /* + * Gracefully wait out the potential current outstanding asynchronous + * mailbox command. + */ - hbq_count = lpfc_sli_hbq_count(); - ptr = phba->hbqslimp.virt; - for (i = 0; i < hbq_count; ++i) { - phba->hbqs[i].hbq_virt = ptr; - INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); - ptr += (lpfc_hbq_defs[i]->entry_count * - sizeof(struct lpfc_hbq_entry)); + /* First, block any pending async mailbox command from posted */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + /* Now, trying to wait it out if we can */ + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { + msleep(10); + if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) + break; } - phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; - phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; - - memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); - - INIT_LIST_HEAD(&phba->hbqbuf_in_list); - - /* Initialize the SLI Layer to run with lpfc HBAs. */ - lpfc_sli_setup(phba); - lpfc_sli_queue_setup(phba); - - retval = lpfc_mem_alloc(phba); - if (retval) { - error = retval; - goto out_free_hbqslimp; + /* Forcefully release the outstanding mailbox command if timed out */ + if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_lock_irq(&phba->hbalock); + mboxq = phba->sli.mbox_active; + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irq(&phba->hbalock); } - /* Initialize and populate the iocb list per host. */ - INIT_LIST_HEAD(&phba->lpfc_iocb_list); - for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { - iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); - if (iocbq_entry == NULL) { - printk(KERN_ERR "%s: only allocated %d iocbs of " - "expected %d count. Unloading driver.\n", - __func__, i, LPFC_IOCB_LIST_CNT); - error = -ENOMEM; - goto out_free_iocbq; - } + /* Tear down the queues in the HBA */ + lpfc_sli4_queue_unset(phba); - iotag = lpfc_sli_next_iotag(phba, iocbq_entry); - if (iotag == 0) { - kfree (iocbq_entry); - printk(KERN_ERR "%s: failed to allocate IOTAG. " - "Unloading driver.\n", - __func__); - error = -ENOMEM; - goto out_free_iocbq; - } + /* Disable PCI subsystem interrupt */ + lpfc_sli4_disable_intr(phba); - spin_lock_irq(&phba->hbalock); - list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); - phba->total_iocbq_bufs++; - spin_unlock_irq(&phba->hbalock); - } + /* Stop kthread signal shall trigger work_done one more time */ + kthread_stop(phba->worker_thread); - /* Initialize HBA structure */ - phba->fc_edtov = FF_DEF_EDTOV; - phba->fc_ratov = FF_DEF_RATOV; - phba->fc_altov = FF_DEF_ALTOV; - phba->fc_arbtov = FF_DEF_ARBTOV; + /* Stop the SLI4 device port */ + phba->pport->work_port_events = 0; +} - INIT_LIST_HEAD(&phba->work_list); - phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); - phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); +/** + * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is to be called to attach a device with SLI-3 interface spec + * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific + * information of the device and driver to see if the driver state that it can + * support this kind of device. If the match is successful, the driver core + * invokes this routine. If this routine determines it can claim the HBA, it + * does all the initialization that it needs to do to handle the HBA properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int __devinit +lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct lpfc_hba *phba; + struct lpfc_vport *vport = NULL; + int error; + uint32_t cfg_mode, intr_mode; - /* Initialize the wait queue head for the kernel thread */ - init_waitqueue_head(&phba->work_waitq); + /* Allocate memory for HBA structure */ + phba = lpfc_hba_alloc(pdev); + if (!phba) + return -ENOMEM; - /* Startup the kernel thread for this host adapter. */ - phba->worker_thread = kthread_run(lpfc_do_work, phba, - "lpfc_worker_%d", phba->brd_no); - if (IS_ERR(phba->worker_thread)) { - error = PTR_ERR(phba->worker_thread); - goto out_free_iocbq; + /* Perform generic PCI device enabling operation */ + error = lpfc_enable_pci_dev(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1401 Failed to enable pci device.\n"); + goto out_free_phba; } - /* Initialize the list of scsi buffers used by driver for scsi IO. */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + /* Set up SLI API function jump table for PCI-device group-0 HBAs */ + error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); + if (error) + goto out_disable_pci_dev; - /* Initialize list of fabric iocbs */ - INIT_LIST_HEAD(&phba->fabric_iocb_list); + /* Set up SLI-3 specific device PCI memory space */ + error = lpfc_sli_pci_mem_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1402 Failed to set up pci memory space.\n"); + goto out_disable_pci_dev; + } - /* Initialize list to save ELS buffers */ - INIT_LIST_HEAD(&phba->elsbuf); + /* Set up phase-1 common device driver resources */ + error = lpfc_setup_driver_resource_phase1(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1403 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s3; + } - vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); - if (!vport) - goto out_kthread_stop; + /* Set up SLI-3 specific device driver resources */ + error = lpfc_sli_driver_resource_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1404 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s3; + } - shost = lpfc_shost_from_vport(vport); - phba->pport = vport; - lpfc_debugfs_initialize(vport); + /* Initialize and populate the iocb list per host */ + error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1405 Failed to initialize iocb list.\n"); + goto out_unset_driver_resource_s3; + } - pci_set_drvdata(pdev, shost); + /* Set up common device driver resources */ + error = lpfc_setup_driver_resource_phase2(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1406 Failed to set up driver resource.\n"); + goto out_free_iocb_list; + } - phba->MBslimaddr = phba->slim_memmap_p; - phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; - phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; - phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; - phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; + /* Create SCSI host to the physical port */ + error = lpfc_create_shost(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1407 Failed to create scsi host.\n"); + goto out_unset_driver_resource; + } /* Configure sysfs attributes */ - if (lpfc_alloc_sysfs_attr(vport)) { + vport = phba->pport; + error = lpfc_alloc_sysfs_attr(vport); + if (error) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1476 Failed to allocate sysfs attr\n"); - error = -ENOMEM; - goto out_destroy_port; + goto out_destroy_shost; } + /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { + /* Put device to a known state before enabling interrupt */ + lpfc_stop_port(phba); /* Configure and enable interrupt */ - intr_mode = lpfc_enable_intr(phba, cfg_mode); + intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0426 Failed to enable interrupt.\n"); + "0431 Failed to enable interrupt.\n"); + error = -ENODEV; goto out_free_sysfs_attr; } - /* HBA SLI setup */ + /* SLI-3 HBA setup */ if (lpfc_sli_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "1477 Failed to set up hba\n"); @@ -2902,185 +6796,65 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) /* Wait 50ms for the interrupts of previous mailbox commands */ msleep(50); - /* Check active interrupts received */ - if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { + /* Check active interrupts on message signaled interrupts */ + if (intr_mode == 0 || + phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; } else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0451 Configure interrupt mode (%d) " + "0447 Configure interrupt mode (%d) " "failed active interrupt test.\n", intr_mode); - if (intr_mode == 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0479 Failed to enable " - "interrupt.\n"); - error = -ENODEV; - goto out_remove_device; - } - /* Stop HBA SLI setups */ - lpfc_stop_port(phba); /* Disable the current interrupt mode */ - lpfc_disable_intr(phba); + lpfc_sli_disable_intr(phba); /* Try next level of interrupt mode */ cfg_mode = --intr_mode; } } - /* - * hba setup may have changed the hba_queue_depth so we need to adjust - * the value of can_queue. - */ - shost->can_queue = phba->cfg_hba_queue_depth - 10; - if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { - - if (lpfc_prot_mask && lpfc_prot_guard) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "1478 Registering BlockGuard with the " - "SCSI layer\n"); + /* Perform post initialization setup */ + lpfc_post_init_setup(phba); - scsi_host_set_prot(shost, lpfc_prot_mask); - scsi_host_set_guard(shost, lpfc_prot_guard); - } - } - - if (!_dump_buf_data) { - int pagecnt = 10; - while (pagecnt) { - spin_lock_init(&_dump_buf_lock); - _dump_buf_data = - (char *) __get_free_pages(GFP_KERNEL, pagecnt); - if (_dump_buf_data) { - printk(KERN_ERR "BLKGRD allocated %d pages for " - "_dump_buf_data at 0x%p\n", - (1 << pagecnt), _dump_buf_data); - _dump_buf_data_order = pagecnt; - memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT) - << pagecnt)); - break; - } else { - --pagecnt; - } - - } - - if (!_dump_buf_data_order) - printk(KERN_ERR "BLKGRD ERROR unable to allocate " - "memory for hexdump\n"); - - } else { - printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" - "\n", _dump_buf_data); - } - - - if (!_dump_buf_dif) { - int pagecnt = 10; - while (pagecnt) { - _dump_buf_dif = - (char *) __get_free_pages(GFP_KERNEL, pagecnt); - if (_dump_buf_dif) { - printk(KERN_ERR "BLKGRD allocated %d pages for " - "_dump_buf_dif at 0x%p\n", - (1 << pagecnt), _dump_buf_dif); - _dump_buf_dif_order = pagecnt; - memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT) - << pagecnt)); - break; - } else { - --pagecnt; - } - - } - - if (!_dump_buf_dif_order) - printk(KERN_ERR "BLKGRD ERROR unable to allocate " - "memory for hexdump\n"); - - } else { - printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", - _dump_buf_dif); - } - - lpfc_host_attrib_init(shost); - - if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - spin_lock_irq(shost->host_lock); - lpfc_poll_start_timer(phba); - spin_unlock_irq(shost->host_lock); - } - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0428 Perform SCSI scan\n"); - /* Send board arrival event to upper layer */ - adapter_event.event_type = FC_REG_ADAPTER_EVENT; - adapter_event.subcategory = LPFC_EVENT_ARRIVAL; - fc_host_post_vendor_event(shost, fc_get_event_number(), - sizeof(adapter_event), - (char *) &adapter_event, - LPFC_NL_VENDOR_ID); + /* Check if there are static vports to be created. */ + lpfc_create_static_vport(phba); return 0; out_remove_device: - spin_lock_irq(shost->host_lock); - vport->load_flag |= FC_UNLOADING; - spin_unlock_irq(shost->host_lock); - lpfc_stop_phba_timers(phba); - phba->pport->work_port_events = 0; - lpfc_disable_intr(phba); - lpfc_sli_hba_down(phba); - lpfc_sli_brdrestart(phba); + lpfc_unset_hba(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); -out_destroy_port: - destroy_port(vport); -out_kthread_stop: - kthread_stop(phba->worker_thread); -out_free_iocbq: - list_for_each_entry_safe(iocbq_entry, iocbq_next, - &phba->lpfc_iocb_list, list) { - kfree(iocbq_entry); - phba->total_iocbq_bufs--; - } - lpfc_mem_free(phba); -out_free_hbqslimp: - dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), - phba->hbqslimp.virt, phba->hbqslimp.phys); -out_free_slim: - dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, - phba->slim2p.virt, phba->slim2p.phys); -out_iounmap: - iounmap(phba->ctrl_regs_memmap_p); -out_iounmap_slim: - iounmap(phba->slim_memmap_p); -out_idr_remove: - idr_remove(&lpfc_hba_index, phba->brd_no); +out_destroy_shost: + lpfc_destroy_shost(phba); +out_unset_driver_resource: + lpfc_unset_driver_resource_phase2(phba); +out_free_iocb_list: + lpfc_free_iocb_list(phba); +out_unset_driver_resource_s3: + lpfc_sli_driver_resource_unset(phba); +out_unset_pci_mem_s3: + lpfc_sli_pci_mem_unset(phba); +out_disable_pci_dev: + lpfc_disable_pci_dev(phba); out_free_phba: - kfree(phba); -out_release_regions: - pci_release_selected_regions(pdev, bars); -out_disable_device: - pci_disable_device(pdev); -out: - pci_set_drvdata(pdev, NULL); - if (shost) - scsi_host_put(shost); + lpfc_hba_free(phba); return error; } /** - * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem + * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. * @pdev: pointer to PCI device * - * This routine is to be registered to the kernel's PCI subsystem. When an - * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup - * for the HBA device to be removed from the PCI subsystem properly. + * This routine is to be called to disattach a device with SLI-3 interface + * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is + * removed from PCI bus, it performs all the necessary cleanup for the HBA + * device to be removed from the PCI subsystem properly. **/ static void __devexit -lpfc_pci_remove_one(struct pci_dev *pdev) +lpfc_pci_remove_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; @@ -3098,7 +6872,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) + for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) fc_vport_terminate(vports[i]->fc_vport); lpfc_destroy_vport_work_array(phba, vports); @@ -3120,7 +6894,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) /* Final cleanup of txcmplq and reset the HBA */ lpfc_sli_brdrestart(phba); - lpfc_stop_phba_timers(phba); + lpfc_stop_hba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); @@ -3128,7 +6902,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) lpfc_debugfs_terminate(vport); /* Disable interrupt */ - lpfc_disable_intr(phba); + lpfc_sli_disable_intr(phba); pci_set_drvdata(pdev, NULL); scsi_host_put(shost); @@ -3138,7 +6912,7 @@ lpfc_pci_remove_one(struct pci_dev *pdev) * corresponding pools here. */ lpfc_scsi_free(phba); - lpfc_mem_free(phba); + lpfc_mem_free_all(phba); dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), phba->hbqslimp.virt, phba->hbqslimp.phys); @@ -3151,36 +6925,35 @@ lpfc_pci_remove_one(struct pci_dev *pdev) iounmap(phba->ctrl_regs_memmap_p); iounmap(phba->slim_memmap_p); - idr_remove(&lpfc_hba_index, phba->brd_no); - - kfree(phba); + lpfc_hba_free(phba); pci_release_selected_regions(pdev, bars); pci_disable_device(pdev); } /** - * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management + * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt * @pdev: pointer to PCI device * @msg: power management message * - * This routine is to be registered to the kernel's PCI subsystem to support - * system Power Management (PM). When PM invokes this method, it quiesces the - * device by stopping the driver's worker thread for the device, turning off - * device's interrupt and DMA, and bring the device offline. Note that as the - * driver implements the minimum PM requirements to a power-aware driver's PM - * support for suspend/resume -- all the possible PM messages (SUSPEND, - * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND - * and the driver will fully reinitialize its device during resume() method - * call, the driver will set device to PCI_D3hot state in PCI config space - * instead of setting it according to the @msg provided by the PM. + * This routine is to be called from the kernel's PCI subsystem to support + * system Power Management (PM) to device with SLI-3 interface spec. When + * PM invokes this method, it quiesces the device by stopping the driver's + * worker thread for the device, turning off device's interrupt and DMA, + * and bring the device offline. Note that as the driver implements the + * minimum PM requirements to a power-aware driver's PM support for the + * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) + * to the suspend() method call will be treated as SUSPEND and the driver will + * fully reinitialize its device during resume() method call, the driver will + * set device to PCI_D3hot state in PCI config space instead of setting it + * according to the @msg provided by the PM. * * Return code - * 0 - driver suspended the device - * Error otherwise + * 0 - driver suspended the device + * Error otherwise **/ static int -lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) +lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3194,7 +6967,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) kthread_stop(phba->worker_thread); /* Disable interrupt from device */ - lpfc_disable_intr(phba); + lpfc_sli_disable_intr(phba); /* Save device state to PCI config space */ pci_save_state(pdev); @@ -3204,25 +6977,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) } /** - * lpfc_pci_resume_one - lpfc PCI func to resume device for power management + * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt * @pdev: pointer to PCI device * - * This routine is to be registered to the kernel's PCI subsystem to support - * system Power Management (PM). When PM invokes this method, it restores - * the device's PCI config space state and fully reinitializes the device - * and brings it online. Note that as the driver implements the minimum PM - * requirements to a power-aware driver's PM for suspend/resume -- all - * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() - * method call will be treated as SUSPEND and the driver will fully - * reinitialize its device during resume() method call, the device will be - * set to PCI_D0 directly in PCI config space before restoring the state. + * This routine is to be called from the kernel's PCI subsystem to support + * system Power Management (PM) to device with SLI-3 interface spec. When PM + * invokes this method, it restores the device's PCI config space state and + * fully reinitializes the device and brings it online. Note that as the + * driver implements the minimum PM requirements to a power-aware driver's + * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, + * FREEZE) to the suspend() method call will be treated as SUSPEND and the + * driver will fully reinitialize its device during resume() method call, + * the device will be set to PCI_D0 directly in PCI config space before + * restoring the state. * * Return code - * 0 - driver suspended the device - * Error otherwise + * 0 - driver suspended the device + * Error otherwise **/ static int -lpfc_pci_resume_one(struct pci_dev *pdev) +lpfc_pci_resume_one_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3250,7 +7024,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev) } /* Configure and enable interrupt */ - intr_mode = lpfc_enable_intr(phba, phba->intr_mode); + intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0430 PM resume Failed to enable interrupt\n"); @@ -3269,23 +7043,24 @@ lpfc_pci_resume_one(struct pci_dev *pdev) } /** - * lpfc_io_error_detected - Driver method for handling PCI I/O error detected + * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error * @pdev: pointer to PCI device. * @state: the current PCI connection state. * - * This routine is registered to the PCI subsystem for error handling. This - * function is called by the PCI subsystem after a PCI bus error affecting - * this device has been detected. When this function is invoked, it will - * need to stop all the I/Os and interrupt(s) to the device. Once that is - * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to - * perform proper recovery as desired. + * This routine is called from the PCI subsystem for I/O error handling to + * device with SLI-3 interface spec. This function is called by the PCI + * subsystem after a PCI bus error affecting this device has been detected. + * When this function is invoked, it will need to stop all the I/Os and + * interrupt(s) to the device. Once that is done, it will return + * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery + * as desired. * * Return codes - * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ -static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, - pci_channel_state_t state) +static pci_ers_result_t +lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3312,30 +7087,32 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, lpfc_sli_abort_iocb_ring(phba, pring); /* Disable interrupt */ - lpfc_disable_intr(phba); + lpfc_sli_disable_intr(phba); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; } /** - * lpfc_io_slot_reset - Restart a PCI device from scratch + * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. * @pdev: pointer to PCI device. * - * This routine is registered to the PCI subsystem for error handling. This is - * called after PCI bus has been reset to restart the PCI card from scratch, - * as if from a cold-boot. During the PCI subsystem error recovery, after the - * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform - * proper error recovery and then call this routine before calling the .resume - * method to recover the device. This function will initialize the HBA device, - * enable the interrupt, but it will just put the HBA to offline state without - * passing any I/O traffic. + * This routine is called from the PCI subsystem for error handling to + * device with SLI-3 interface spec. This is called after PCI bus has been + * reset to restart the PCI card from scratch, as if from a cold-boot. + * During the PCI subsystem error recovery, after driver returns + * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error + * recovery and then call this routine before calling the .resume method + * to recover the device. This function will initialize the HBA device, + * enable the interrupt, but it will just put the HBA to offline state + * without passing any I/O traffic. * * Return codes - * PCI_ERS_RESULT_RECOVERED - the device has been recovered - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered */ -static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) +static pci_ers_result_t +lpfc_io_slot_reset_s3(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -3354,11 +7131,11 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) pci_set_master(pdev); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); /* Configure and enable interrupt */ - intr_mode = lpfc_enable_intr(phba, phba->intr_mode); + intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0427 Cannot re-enable interrupt after " @@ -3378,20 +7155,713 @@ static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) } /** - * lpfc_io_resume - Resume PCI I/O operation + * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. * @pdev: pointer to PCI device * - * This routine is registered to the PCI subsystem for error handling. It is - * called when kernel error recovery tells the lpfc driver that it is ok to - * resume normal PCI operation after PCI bus error recovery. After this call, - * traffic can start to flow from this device again. + * This routine is called from the PCI subsystem for error handling to device + * with SLI-3 interface spec. It is called when kernel error recovery tells + * the lpfc driver that it is ok to resume normal PCI operation after PCI bus + * error recovery. After this call, traffic can start to flow from this device + * again. */ -static void lpfc_io_resume(struct pci_dev *pdev) +static void +lpfc_io_resume_s3(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + lpfc_online(phba); +} + +/** + * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve + * @phba: pointer to lpfc hba data structure. + * + * returns the number of ELS/CT IOCBs to reserve + **/ +int +lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) +{ + int max_xri = phba->sli4_hba.max_cfg_param.max_xri; + + if (max_xri <= 100) + return 4; + else if (max_xri <= 256) + return 8; + else if (max_xri <= 512) + return 16; + else if (max_xri <= 1024) + return 32; + else + return 48; +} + +/** + * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is called from the kernel's PCI subsystem to device with + * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is + * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific + * information of the device and driver to see if the driver state that it + * can support this kind of device. If the match is successful, the driver + * core invokes this routine. If this routine determines it can claim the HBA, + * it does all the initialization that it needs to do to handle the HBA + * properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int __devinit +lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + struct lpfc_hba *phba; + struct lpfc_vport *vport = NULL; + int error; + uint32_t cfg_mode, intr_mode; + int mcnt; + + /* Allocate memory for HBA structure */ + phba = lpfc_hba_alloc(pdev); + if (!phba) + return -ENOMEM; + + /* Perform generic PCI device enabling operation */ + error = lpfc_enable_pci_dev(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1409 Failed to enable pci device.\n"); + goto out_free_phba; + } + + /* Set up SLI API function jump table for PCI-device group-1 HBAs */ + error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); + if (error) + goto out_disable_pci_dev; + + /* Set up SLI-4 specific device PCI memory space */ + error = lpfc_sli4_pci_mem_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1410 Failed to set up pci memory space.\n"); + goto out_disable_pci_dev; + } + + /* Set up phase-1 common device driver resources */ + error = lpfc_setup_driver_resource_phase1(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1411 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s4; + } + + /* Set up SLI-4 Specific device driver resources */ + error = lpfc_sli4_driver_resource_setup(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1412 Failed to set up driver resource.\n"); + goto out_unset_pci_mem_s4; + } + + /* Initialize and populate the iocb list per host */ + error = lpfc_init_iocb_list(phba, + phba->sli4_hba.max_cfg_param.max_xri); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1413 Failed to initialize iocb list.\n"); + goto out_unset_driver_resource_s4; + } + + /* Set up common device driver resources */ + error = lpfc_setup_driver_resource_phase2(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1414 Failed to set up driver resource.\n"); + goto out_free_iocb_list; + } + + /* Create SCSI host to the physical port */ + error = lpfc_create_shost(phba); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1415 Failed to create scsi host.\n"); + goto out_unset_driver_resource; + } + + /* Configure sysfs attributes */ + vport = phba->pport; + error = lpfc_alloc_sysfs_attr(vport); + if (error) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1416 Failed to allocate sysfs attr\n"); + goto out_destroy_shost; + } + + /* Now, trying to enable interrupt and bring up the device */ + cfg_mode = phba->cfg_use_msi; + while (true) { + /* Put device to a known state before enabling interrupt */ + lpfc_stop_port(phba); + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0426 Failed to enable interrupt.\n"); + error = -ENODEV; + goto out_free_sysfs_attr; + } + /* Set up SLI-4 HBA */ + if (lpfc_sli4_hba_setup(phba)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1421 Failed to set up hba\n"); + error = -ENODEV; + goto out_disable_intr; + } + + /* Send NOP mbx cmds for non-INTx mode active interrupt test */ + if (intr_mode != 0) + mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, + LPFC_ACT_INTR_CNT); + + /* Check active interrupts received only for MSI/MSI-X */ + if (intr_mode == 0 || + phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { + /* Log the current active interrupt mode */ + phba->intr_mode = intr_mode; + lpfc_log_intr_mode(phba, intr_mode); + break; + } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0451 Configure interrupt mode (%d) " + "failed active interrupt test.\n", + intr_mode); + /* Unset the preivous SLI-4 HBA setup */ + lpfc_sli4_unset_hba(phba); + /* Try next level of interrupt mode */ + cfg_mode = --intr_mode; + } + + /* Perform post initialization setup */ + lpfc_post_init_setup(phba); + + return 0; + +out_disable_intr: + lpfc_sli4_disable_intr(phba); +out_free_sysfs_attr: + lpfc_free_sysfs_attr(vport); +out_destroy_shost: + lpfc_destroy_shost(phba); +out_unset_driver_resource: + lpfc_unset_driver_resource_phase2(phba); +out_free_iocb_list: + lpfc_free_iocb_list(phba); +out_unset_driver_resource_s4: + lpfc_sli4_driver_resource_unset(phba); +out_unset_pci_mem_s4: + lpfc_sli4_pci_mem_unset(phba); +out_disable_pci_dev: + lpfc_disable_pci_dev(phba); +out_free_phba: + lpfc_hba_free(phba); + return error; +} + +/** + * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem + * @pdev: pointer to PCI device + * + * This routine is called from the kernel's PCI subsystem to device with + * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is + * removed from PCI bus, it performs all the necessary cleanup for the HBA + * device to be removed from the PCI subsystem properly. + **/ +static void __devexit +lpfc_pci_remove_one_s4(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_vport **vports; + struct lpfc_hba *phba = vport->phba; + int i; + + /* Mark the device unloading flag */ + spin_lock_irq(&phba->hbalock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(&phba->hbalock); + + /* Free the HBA sysfs attributes */ + lpfc_free_sysfs_attr(vport); + + /* Release all the vports against this physical port */ + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) + fc_vport_terminate(vports[i]->fc_vport); + lpfc_destroy_vport_work_array(phba, vports); + + /* Remove FC host and then SCSI host with the physical port */ + fc_remove_host(shost); + scsi_remove_host(shost); + + /* Perform cleanup on the physical port */ + lpfc_cleanup(vport); + + /* + * Bring down the SLI Layer. This step disables all interrupts, + * clears the rings, discards all mailbox commands, and resets + * the HBA FCoE function. + */ + lpfc_debugfs_terminate(vport); + lpfc_sli4_hba_unset(phba); + + spin_lock_irq(&phba->hbalock); + list_del_init(&vport->listentry); + spin_unlock_irq(&phba->hbalock); + + /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi + * buffers are released to their corresponding pools here. + */ + lpfc_scsi_free(phba); + lpfc_sli4_driver_resource_unset(phba); + + /* Unmap adapter Control and Doorbell registers */ + lpfc_sli4_pci_mem_unset(phba); + + /* Release PCI resources and disable device's PCI function */ + scsi_host_put(shost); + lpfc_disable_pci_dev(phba); + + /* Finally, free the driver's device data structure */ + lpfc_hba_free(phba); + + return; +} + +/** + * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt + * @pdev: pointer to PCI device + * @msg: power management message + * + * This routine is called from the kernel's PCI subsystem to support system + * Power Management (PM) to device with SLI-4 interface spec. When PM invokes + * this method, it quiesces the device by stopping the driver's worker + * thread for the device, turning off device's interrupt and DMA, and bring + * the device offline. Note that as the driver implements the minimum PM + * requirements to a power-aware driver's PM support for suspend/resume -- all + * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() + * method call will be treated as SUSPEND and the driver will fully + * reinitialize its device during resume() method call, the driver will set + * device to PCI_D3hot state in PCI config space instead of setting it + * according to the @msg provided by the PM. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0298 PCI device Power Management suspend.\n"); + + /* Bring down the device */ + lpfc_offline_prep(phba); + lpfc_offline(phba); + kthread_stop(phba->worker_thread); + + /* Disable interrupt from device */ + lpfc_sli4_disable_intr(phba); + + /* Save device state to PCI config space */ + pci_save_state(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +/** + * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt + * @pdev: pointer to PCI device + * + * This routine is called from the kernel's PCI subsystem to support system + * Power Management (PM) to device with SLI-4 interface spac. When PM invokes + * this method, it restores the device's PCI config space state and fully + * reinitializes the device and brings it online. Note that as the driver + * implements the minimum PM requirements to a power-aware driver's PM for + * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) + * to the suspend() method call will be treated as SUSPEND and the driver + * will fully reinitialize its device during resume() method call, the device + * will be set to PCI_D0 directly in PCI config space before restoring the + * state. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_resume_one_s4(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + uint32_t intr_mode; + int error; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0292 PCI device Power Management resume.\n"); + + /* Restore device state from PCI config space */ + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + if (pdev->is_busmaster) + pci_set_master(pdev); + + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0293 PM resume failed to start worker " + "thread: error=x%x.\n", error); + return error; + } + + /* Configure and enable interrupt */ + intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0294 PM resume Failed to enable interrupt\n"); + return -EIO; + } else + phba->intr_mode = intr_mode; + + /* Restart HBA and bring it online */ + lpfc_sli_brdrestart(phba); lpfc_online(phba); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return 0; +} + +/** + * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. This function is called by the PCI subsystem + * after a PCI bus error affecting this device has been detected. When this + * function is invoked, it will need to stop all the I/Os and interrupt(s) + * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET + * for the PCI subsystem to perform proper recovery as desired. + * + * Return codes + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) +{ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch + * @pdev: pointer to PCI device. + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. It is called after PCI bus has been reset to + * restart the PCI card from scratch, as if from a cold-boot. During the + * PCI subsystem error recovery, after the driver returns + * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error + * recovery and then call this routine before calling the .resume method to + * recover the device. This function will initialize the HBA device, enable + * the interrupt, but it will just put the HBA to offline state without + * passing any I/O traffic. + * + * Return codes + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + */ +static pci_ers_result_t +lpfc_io_slot_reset_s4(struct pci_dev *pdev) +{ + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device + * @pdev: pointer to PCI device + * + * This routine is called from the PCI subsystem for error handling to device + * with SLI-4 interface spec. It is called when kernel error recovery tells + * the lpfc driver that it is ok to resume normal PCI operation after PCI bus + * error recovery. After this call, traffic can start to flow from this device + * again. + **/ +static void +lpfc_io_resume_s4(struct pci_dev *pdev) +{ + return; +} + +/** + * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier + * + * This routine is to be registered to the kernel's PCI subsystem. When an + * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks + * at PCI device-specific information of the device and driver to see if the + * driver state that it can support this kind of device. If the match is + * successful, the driver core invokes this routine. This routine dispatches + * the action to the proper SLI-3 or SLI-4 device probing routine, which will + * do all the initialization that it needs to do to handle the HBA device + * properly. + * + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device + **/ +static int __devinit +lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) +{ + int rc; + uint16_t dev_id; + + if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) + return -ENODEV; + + switch (dev_id) { + case PCI_DEVICE_ID_TIGERSHARK: + case PCI_DEVICE_ID_TIGERSHARK_S: + rc = lpfc_pci_probe_one_s4(pdev, pid); + break; + default: + rc = lpfc_pci_probe_one_s3(pdev, pid); + break; + } + return rc; +} + +/** + * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem + * @pdev: pointer to PCI device + * + * This routine is to be registered to the kernel's PCI subsystem. When an + * Emulex HBA is removed from PCI bus, the driver core invokes this routine. + * This routine dispatches the action to the proper SLI-3 or SLI-4 device + * remove routine, which will perform all the necessary cleanup for the + * device to be removed from the PCI subsystem properly. + **/ +static void __devexit +lpfc_pci_remove_one(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + lpfc_pci_remove_one_s3(pdev); + break; + case LPFC_PCI_DEV_OC: + lpfc_pci_remove_one_s4(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1424 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return; +} + +/** + * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management + * @pdev: pointer to PCI device + * @msg: power management message + * + * This routine is to be registered to the kernel's PCI subsystem to support + * system Power Management (PM). When PM invokes this method, it dispatches + * the action to the proper SLI-3 or SLI-4 device suspend routine, which will + * suspend the device. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int rc = -ENODEV; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_pci_suspend_one_s3(pdev, msg); + break; + case LPFC_PCI_DEV_OC: + rc = lpfc_pci_suspend_one_s4(pdev, msg); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1425 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management + * @pdev: pointer to PCI device + * + * This routine is to be registered to the kernel's PCI subsystem to support + * system Power Management (PM). When PM invokes this method, it dispatches + * the action to the proper SLI-3 or SLI-4 device resume routine, which will + * resume the device. + * + * Return code + * 0 - driver suspended the device + * Error otherwise + **/ +static int +lpfc_pci_resume_one(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + int rc = -ENODEV; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_pci_resume_one_s3(pdev); + break; + case LPFC_PCI_DEV_OC: + rc = lpfc_pci_resume_one_s4(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1426 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_error_detected - lpfc method for handling PCI I/O error + * @pdev: pointer to PCI device. + * @state: the current PCI connection state. + * + * This routine is registered to the PCI subsystem for error handling. This + * function is called by the PCI subsystem after a PCI bus error affecting + * this device has been detected. When this routine is invoked, it dispatches + * the action to the proper SLI-3 or SLI-4 device error detected handling + * routine, which will perform the proper error detected operation. + * + * Return codes + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_io_error_detected_s3(pdev, state); + break; + case LPFC_PCI_DEV_OC: + rc = lpfc_io_error_detected_s4(pdev, state); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1427 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch + * @pdev: pointer to PCI device. + * + * This routine is registered to the PCI subsystem for error handling. This + * function is called after PCI bus has been reset to restart the PCI card + * from scratch, as if from a cold-boot. When this routine is invoked, it + * dispatches the action to the proper SLI-3 or SLI-4 device reset handling + * routine, which will perform the proper device reset. + * + * Return codes + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + **/ +static pci_ers_result_t +lpfc_io_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + rc = lpfc_io_slot_reset_s3(pdev); + break; + case LPFC_PCI_DEV_OC: + rc = lpfc_io_slot_reset_s4(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1428 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return rc; +} + +/** + * lpfc_io_resume - lpfc method for resuming PCI I/O operation + * @pdev: pointer to PCI device + * + * This routine is registered to the PCI subsystem for error handling. It + * is called when kernel error recovery tells the lpfc driver that it is + * OK to resume normal PCI operation after PCI bus error recovery. When + * this routine is invoked, it dispatches the action to the proper SLI-3 + * or SLI-4 device io_resume routine, which will resume the device operation. + **/ +static void +lpfc_io_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + switch (phba->pci_dev_grp) { + case LPFC_PCI_DEV_LP: + lpfc_io_resume_s3(pdev); + break; + case LPFC_PCI_DEV_OC: + lpfc_io_resume_s4(pdev); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1429 Invalid PCI device group: 0x%x\n", + phba->pci_dev_grp); + break; + } + return; } static struct pci_device_id lpfc_id_table[] = { @@ -3469,6 +7939,10 @@ static struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, + PCI_ANY_ID, PCI_ANY_ID, }, + {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, + PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; @@ -3486,7 +7960,7 @@ static struct pci_driver lpfc_driver = { .probe = lpfc_pci_probe_one, .remove = __devexit_p(lpfc_pci_remove_one), .suspend = lpfc_pci_suspend_one, - .resume = lpfc_pci_resume_one, + .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, }; diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 1aa85709b012..954ba57970a3 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,33 +18,39 @@ * included with this package. * *******************************************************************/ -#define LOG_ELS 0x1 /* ELS events */ -#define LOG_DISCOVERY 0x2 /* Link discovery events */ -#define LOG_MBOX 0x4 /* Mailbox events */ -#define LOG_INIT 0x8 /* Initialization events */ -#define LOG_LINK_EVENT 0x10 /* Link events */ -#define LOG_IP 0x20 /* IP traffic history */ -#define LOG_FCP 0x40 /* FCP traffic history */ -#define LOG_NODE 0x80 /* Node table events */ -#define LOG_TEMP 0x100 /* Temperature sensor events */ -#define LOG_BG 0x200 /* BlockGuard events */ -#define LOG_MISC 0x400 /* Miscellaneous events */ -#define LOG_SLI 0x800 /* SLI events */ -#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ -#define LOG_LIBDFC 0x2000 /* Libdfc events */ -#define LOG_VPORT 0x4000 /* NPIV events */ -#define LOG_ALL_MSG 0xffff /* LOG all messages */ +#define LOG_ELS 0x00000001 /* ELS events */ +#define LOG_DISCOVERY 0x00000002 /* Link discovery events */ +#define LOG_MBOX 0x00000004 /* Mailbox events */ +#define LOG_INIT 0x00000008 /* Initialization events */ +#define LOG_LINK_EVENT 0x00000010 /* Link events */ +#define LOG_IP 0x00000020 /* IP traffic history */ +#define LOG_FCP 0x00000040 /* FCP traffic history */ +#define LOG_NODE 0x00000080 /* Node table events */ +#define LOG_TEMP 0x00000100 /* Temperature sensor events */ +#define LOG_BG 0x00000200 /* BlockGuard events */ +#define LOG_MISC 0x00000400 /* Miscellaneous events */ +#define LOG_SLI 0x00000800 /* SLI events */ +#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */ +#define LOG_LIBDFC 0x00002000 /* Libdfc events */ +#define LOG_VPORT 0x00004000 /* NPIV events */ +#define LOF_SECURITY 0x00008000 /* Security events */ +#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ +#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ - do { \ - { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ +do { \ + { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ - } while (0) +} while (0) #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ - do { \ - { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ +do { \ + { uint32_t log_verbose = (phba)->pport ? \ + (phba)->pport->cfg_log_verbose : \ + (phba)->cfg_log_verbose; \ + if (((mask) & log_verbose) || (level[1] <= '3')) \ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ - fmt, phba->brd_no, ##arg); } \ - } while (0) + fmt, phba->brd_no, ##arg); \ + } \ +} while (0) diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 134fc7fc2127..b9b451c09010 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -28,8 +28,10 @@ #include <scsi/scsi.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -39,6 +41,44 @@ #include "lpfc_compat.h" /** + * lpfc_dump_static_vport - Dump HBA's static vport information. + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * @offset: offset for dumping vport info. + * + * The dump mailbox command provides a method for the device driver to obtain + * various types of information from the HBA device. + * + * This routine prepares the mailbox command for dumping list of static + * vports to be created. + **/ +void +lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, + uint16_t offset) +{ + MAILBOX_t *mb; + void *ctx; + + mb = &pmb->u.mb; + ctx = pmb->context2; + + /* Setup to dump vport info region */ + memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->un.varDmp.cv = 1; + mb->un.varDmp.type = DMP_NV_PARAMS; + mb->un.varDmp.entry_index = offset; + mb->un.varDmp.region_id = DMP_REGION_VPORT; + mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); + mb->un.varDmp.co = 0; + mb->un.varDmp.resp_offset = 0; + pmb->context2 = ctx; + mb->mbxOwner = OWN_HOST; + + return; +} + +/** * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. @@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) MAILBOX_t *mb; void *ctx; - mb = &pmb->mb; + mb = &pmb->u.mb; ctx = pmb->context2; /* Setup to dump VPD region */ @@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) MAILBOX_t *mb; void *ctx; - mb = &pmb->mb; + mb = &pmb->u.mb; /* Save context so that we can restore after memset */ ctx = pmb->context2; @@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_NV; mb->mbxOwner = OWN_HOST; @@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_ASYNCEVT_ENABLE; mb->un.varCfgAsyncEvent.ring = ring; @@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_HEARTBEAT; mb->mbxOwner = OWN_HOST; @@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) struct lpfc_sli *psli; psli = &phba->sli; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); INIT_LIST_HEAD(&mp->list); @@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varClearLA.eventTag = phba->fc_eventTag; @@ -275,7 +315,7 @@ void lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { struct lpfc_vport *vport = phba->pport; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* NEW_FEATURE @@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) int lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; uint32_t attentionConditions[2]; /* Sanity check */ @@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba, struct lpfc_sli *psli; MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); psli = &phba->sli; @@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) struct lpfc_sli *psli; psli = &phba->sli; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxOwner = OWN_HOST; @@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); - mb->un.varRdSparm.vpi = vpi; + mb->un.varRdSparm.vpi = vpi + phba->vpi_base; /* save address for completion */ pmb->context1 = mp; @@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregDID.did = did; + if (vpi != 0xffff) + vpi += phba->vpi_base; mb->un.varUnregDID.vpi = vpi; mb->mbxCommand = MBX_UNREG_D_ID; @@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_CONFIG; @@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_LNK_STAT; @@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) } /** - * lpfc_reg_login - Prepare a mailbox command for registering remote login + * lpfc_reg_rpi - Prepare a mailbox command for registering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. @@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) * 1 - DMA memory allocation failed **/ int -lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, +lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; uint8_t *sparam; struct lpfc_dmabuf *mp; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRegLogin.rpi = 0; - mb->un.varRegLogin.vpi = vpi; + if (phba->sli_rev == LPFC_SLI_REV4) { + mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba); + if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) + return 1; + } + + mb->un.varRegLogin.vpi = vpi + phba->vpi_base; mb->un.varRegLogin.did = did; mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ @@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, { MAILBOX_t *mb; - mb = &pmb->mb; + mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregLogin.rpi = (uint16_t) rpi; mb->un.varUnregLogin.rsvd1 = 0; - mb->un.varUnregLogin.vpi = vpi; + mb->un.varUnregLogin.vpi = vpi + phba->vpi_base; mb->mbxCommand = MBX_UNREG_LOGIN; mb->mbxOwner = OWN_HOST; + return; } @@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, * This routine prepares the mailbox command for registering a virtual N_Port. **/ void -lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, - LPFC_MBOXQ_t *pmb) +lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - mb->un.varRegVpi.vpi = vpi; - mb->un.varRegVpi.sid = sid; + mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; + mb->un.varRegVpi.sid = vport->fc_myDID; + mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; mb->mbxCommand = MBX_REG_VPI; mb->mbxOwner = OWN_HOST; @@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, void lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - mb->un.varUnregVpi.vpi = vpi; + mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; mb->mbxCommand = MBX_UNREG_VPI; mb->mbxOwner = OWN_HOST; @@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) void lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRdRev.cv = 1; mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ @@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) { int i; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); @@ -1020,7 +1069,7 @@ void lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) { int i; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; struct lpfc_sli *psli; struct lpfc_sli_ring *pring; @@ -1075,7 +1124,7 @@ void lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; dma_addr_t pdma_addr; uint32_t bar_low, bar_high; size_t offset; @@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* If HBA supports SLI=3 ask for it */ - if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { + if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ + mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); if (phba->max_vpi && phba->cfg_enable_npiv && phba->vpd.sli3Feat.cmv) { - mb->un.varCfgPort.max_vpi = phba->max_vpi; + mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI; mb->un.varCfgPort.cmv = 1; } else mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; } else - phba->sli_rev = 2; + phba->sli_rev = LPFC_SLI_REV2; mb->un.varCfgPort.sli_mode = phba->sli_rev; /* Now setup pcb */ @@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) void lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_KILL_BOARD; @@ -1305,29 +1355,98 @@ lpfc_mbox_get(struct lpfc_hba * phba) } /** + * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list + * @phba: pointer to lpfc hba data structure. + * @mbq: pointer to the driver internal queue element for mailbox command. + * + * This routine put the completed mailbox command into the mailbox command + * complete list. This is the unlocked version of the routine. The mailbox + * complete list is used by the driver worker thread to process mailbox + * complete callback functions outside the driver interrupt handler. + **/ +void +__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) +{ + list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); +} + +/** * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command - * complete list. This routine is called from driver interrupt handler - * context.The mailbox complete list is used by the driver worker thread - * to process mailbox complete callback functions outside the driver interrupt - * handler. + * complete list. This is the locked version of the routine. The mailbox + * complete list is used by the driver worker thread to process mailbox + * complete callback functions outside the driver interrupt handler. **/ void -lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) +lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) { unsigned long iflag; /* This function expects to be called from interrupt context */ spin_lock_irqsave(&phba->hbalock, iflag); - list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); + __lpfc_mbox_cmpl_put(phba, mbq); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } /** + * lpfc_mbox_cmd_check - Check the validality of a mailbox command + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to the driver internal queue element for mailbox command. + * + * This routine is to check whether a mailbox command is valid to be issued. + * This check will be performed by both the mailbox issue API when a client + * is to issue a mailbox command to the mailbox transport. + * + * Return 0 - pass the check, -ENODEV - fail the check + **/ +int +lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + /* Mailbox command that have a completion handler must also have a + * vport specified. + */ + if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl && + mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) { + if (!mboxq->vport) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, + "1814 Mbox x%x failed, no vport\n", + mboxq->u.mb.mbxCommand); + dump_stack(); + return -ENODEV; + } + } + return 0; +} + +/** + * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command + * @phba: pointer to lpfc hba data structure. + * + * This routine is to check whether the HBA device is ready for posting a + * mailbox command. It is used by the mailbox transport API at the time the + * to post a mailbox command to the device. + * + * Return 0 - pass the check, -ENODEV - fail the check + **/ +int +lpfc_mbox_dev_check(struct lpfc_hba *phba) +{ + /* If the PCI channel is in offline state, do not issue mbox */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return -ENODEV; + + /* If the HBA is in error state, do not issue mbox */ + if (phba->link_state == LPFC_HBA_ERROR) + return -ENODEV; + + return 0; +} + +/** * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value * @phba: pointer to lpfc hba data structure. * @cmd: mailbox command code. @@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) case MBX_WRITE_WWN: /* 0x98 */ case MBX_LOAD_EXP_ROM: /* 0x9C */ return LPFC_MBOX_TMO_FLASH_CMD; + case MBX_SLI4_CONFIG: /* 0x9b */ + return LPFC_MBOX_SLI4_CONFIG_TMO; } return LPFC_MBOX_TMO; } + +/** + * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command + * @mbox: pointer to lpfc mbox command. + * @sgentry: sge entry index. + * @phyaddr: physical address for the sge + * @length: Length of the sge. + * + * This routine sets up an entry in the non-embedded mailbox command at the sge + * index location. + **/ +void +lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry, + dma_addr_t phyaddr, uint32_t length) +{ + struct lpfc_mbx_nembed_cmd *nembed_sge; + + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &mbox->u.mqe.un.nembed_cmd; + nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr); + nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr); + nembed_sge->sge[sgentry].length = length; +} + +/** + * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command + * @mbox: pointer to lpfc mbox command. + * @sgentry: sge entry index. + * + * This routine gets an entry from the non-embedded mailbox command at the sge + * index location. + **/ +void +lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry, + struct lpfc_mbx_sge *sge) +{ + struct lpfc_mbx_nembed_cmd *nembed_sge; + + nembed_sge = (struct lpfc_mbx_nembed_cmd *) + &mbox->u.mqe.un.nembed_cmd; + sge->pa_lo = nembed_sge->sge[sgentry].pa_lo; + sge->pa_hi = nembed_sge->sge[sgentry].pa_hi; + sge->length = nembed_sge->sge[sgentry].length; +} + +/** + * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command. + * + * This routine frees SLI4 specific mailbox command for sending IOCTL command. + **/ +void +lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_mbx_sli4_config *sli4_cfg; + struct lpfc_mbx_sge sge; + dma_addr_t phyaddr; + uint32_t sgecount, sgentry; + + sli4_cfg = &mbox->u.mqe.un.sli4_config; + + /* For embedded mbox command, just free the mbox command */ + if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + + /* For non-embedded mbox command, we need to free the pages first */ + sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr); + /* There is nothing we can do if there is no sge address array */ + if (unlikely(!mbox->sge_array)) { + mempool_free(mbox, phba->mbox_mem_pool); + return; + } + /* Each non-embedded DMA memory was allocated in the length of a page */ + for (sgentry = 0; sgentry < sgecount; sgentry++) { + lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); + phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); + dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE, + mbox->sge_array->addr[sgentry], phyaddr); + } + /* Free the sge address array memory */ + kfree(mbox->sge_array); + /* Finally, free the mailbox command itself */ + mempool_free(mbox, phba->mbox_mem_pool); +} + +/** + * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command. + * @subsystem: The sli4 config sub mailbox subsystem. + * @opcode: The sli4 config sub mailbox command opcode. + * @length: Length of the sli4 config mailbox command. + * + * This routine sets up the header fields of SLI4 specific mailbox command + * for sending IOCTL command. + * + * Return: the actual length of the mbox command allocated (mostly useful + * for none embedded mailbox command). + **/ +int +lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, + uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb) +{ + struct lpfc_mbx_sli4_config *sli4_config; + union lpfc_sli4_cfg_shdr *cfg_shdr = NULL; + uint32_t alloc_len; + uint32_t resid_len; + uint32_t pagen, pcount; + void *viraddr; + dma_addr_t phyaddr; + + /* Set up SLI4 mailbox command header fields */ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG); + + /* Set up SLI4 ioctl command header fields */ + sli4_config = &mbox->u.mqe.un.sli4_config; + + /* Setup for the embedded mbox command */ + if (emb) { + /* Set up main header fields */ + bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); + sli4_config->header.cfg_mhdr.payload_length = + LPFC_MBX_CMD_HDR_LENGTH + length; + /* Set up sub-header fields following main header */ + bf_set(lpfc_mbox_hdr_opcode, + &sli4_config->header.cfg_shdr.request, opcode); + bf_set(lpfc_mbox_hdr_subsystem, + &sli4_config->header.cfg_shdr.request, subsystem); + sli4_config->header.cfg_shdr.request.request_length = length; + return length; + } + + /* Setup for the none-embedded mbox command */ + pcount = (PAGE_ALIGN(length))/PAGE_SIZE; + pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? + LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; + /* Allocate record for keeping SGE virtual addresses */ + mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), + GFP_KERNEL); + if (!mbox->sge_array) + return 0; + + for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { + /* The DMA memory is always allocated in the length of a + * page even though the last SGE might not fill up to a + * page, this is used as a priori size of PAGE_SIZE for + * the later DMA memory free. + */ + viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE, + &phyaddr, GFP_KERNEL); + /* In case of malloc fails, proceed with whatever we have */ + if (!viraddr) + break; + mbox->sge_array->addr[pagen] = viraddr; + /* Keep the first page for later sub-header construction */ + if (pagen == 0) + cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; + resid_len = length - alloc_len; + if (resid_len > PAGE_SIZE) { + lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, + PAGE_SIZE); + alloc_len += PAGE_SIZE; + } else { + lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, + resid_len); + alloc_len = length; + } + } + + /* Set up main header fields in mailbox command */ + sli4_config->header.cfg_mhdr.payload_length = alloc_len; + bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen); + + /* Set up sub-header fields into the first page */ + if (pagen > 0) { + bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode); + bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem); + cfg_shdr->request.request_length = + alloc_len - sizeof(union lpfc_sli4_cfg_shdr); + } + /* The sub-header is in DMA memory, which needs endian converstion */ + lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, + sizeof(union lpfc_sli4_cfg_shdr)); + + return alloc_len; +} + +/** + * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command. + * + * This routine gets the opcode from a SLI4 specific mailbox command for + * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG + * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be + * returned. + **/ +uint8_t +lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_mbx_sli4_config *sli4_cfg; + union lpfc_sli4_cfg_shdr *cfg_shdr; + + if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) + return 0; + sli4_cfg = &mbox->u.mqe.un.sli4_config; + + /* For embedded mbox command, get opcode from embedded sub-header*/ + if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { + cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; + return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); + } + + /* For non-embedded mbox command, get opcode from first dma page */ + if (unlikely(!mbox->sge_array)) + return 0; + cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; + return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); +} + +/** + * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox + * @mboxq: pointer to lpfc mbox command. + * + * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES + * mailbox command. + **/ +void +lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) +{ + /* Set up SLI4 mailbox command header fields */ + memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); + bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS); + + /* Set up host requested features. */ + bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); + + /* Virtual fabrics and FIPs are not supported yet. */ + bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); + + /* Enable DIF (block guard) only if configured to do so. */ + if (phba->cfg_enable_bg) + bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); + + /* Enable NPIV only if configured to do so. */ + if (phba->max_vpi && phba->cfg_enable_npiv) + bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); + + return; +} + +/** + * lpfc_init_vfi - Initialize the INIT_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vport: Vport associated with the VF. + * + * This routine initializes @mbox to all zeros and then fills in the mailbox + * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI + * in the context of an FCF. The driver issues this command to setup a VFI + * before issuing a FLOGI to login to the VSAN. The driver should also issue a + * REG_VFI after a successful VSAN login. + **/ +void +lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) +{ + struct lpfc_mbx_init_vfi *init_vfi; + + memset(mbox, 0, sizeof(*mbox)); + init_vfi = &mbox->u.mqe.un.init_vfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); + bf_set(lpfc_init_vfi_vr, init_vfi, 1); + bf_set(lpfc_init_vfi_vt, init_vfi, 1); + bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); + bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); +} + +/** + * lpfc_reg_vfi - Initialize the REG_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vport: vport associated with the VF. + * @phys: BDE DMA bus address used to send the service parameters to the HBA. + * + * This routine initializes @mbox to all zeros and then fills in the mailbox + * fields from @vport, and uses @buf as a DMAable buffer to send the vport's + * fc service parameters to the HBA for this VFI. REG_VFI configures virtual + * fabrics identified by VFI in the context of an FCF. + **/ +void +lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) +{ + struct lpfc_mbx_reg_vfi *reg_vfi; + + memset(mbox, 0, sizeof(*mbox)); + reg_vfi = &mbox->u.mqe.un.reg_vfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); + bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); + bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); + bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); + bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); + reg_vfi->bde.addrHigh = putPaddrHigh(phys); + reg_vfi->bde.addrLow = putPaddrLow(phys); + reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); + reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); +} + +/** + * lpfc_init_vpi - Initialize the INIT_VPI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vpi: VPI to be initialized. + * + * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the + * command to activate a virtual N_Port. The HBA assigns a MAC address to use + * with the virtual N Port. The SLI Host issues this command before issuing a + * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a + * successful virtual NPort login. + **/ +void +lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); + bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi); +} + +/** + * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @vfi: VFI to be unregistered. + * + * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric + * (logical NPort) into the inactive state. The SLI Host must have logged out + * and unregistered all remote N_Ports to abort any activity on the virtual + * fabric. The SLI Port posts the mailbox response after marking the virtual + * fabric inactive. + **/ +void +lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); + bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi); +} + +/** + * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters. + * @phba: pointer to the hba structure containing. + * @mbox: pointer to lpfc mbox command to initialize. + * + * This function create a SLI4 dump mailbox command to dump FCoE + * parameters stored in region 23. + **/ +int +lpfc_dump_fcoe_param(struct lpfc_hba *phba, + struct lpfcMboxq *mbox) +{ + struct lpfc_dmabuf *mp = NULL; + MAILBOX_t *mb; + + memset(mbox, 0, sizeof(*mbox)); + mb = &mbox->u.mb; + + mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (mp) + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + + if (!mp || !mp->virt) { + kfree(mp); + /* dump_fcoe_param failed to allocate memory */ + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "2569 lpfc_dump_fcoe_param: memory" + " allocation failed \n"); + return 1; + } + + memset(mp->virt, 0, LPFC_BPL_SIZE); + INIT_LIST_HEAD(&mp->list); + + /* save address for completion */ + mbox->context1 = (uint8_t *) mp; + + mb->mbxCommand = MBX_DUMP_MEMORY; + mb->un.varDmp.type = DMP_NV_PARAMS; + mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM; + mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE; + mb->un.varWords[3] = putPaddrLow(mp->phys); + mb->un.varWords[4] = putPaddrHigh(mp->phys); + return 0; +} + +/** + * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command + * @phba: pointer to the hba structure containing the FCF index and RQ ID. + * @mbox: pointer to lpfc mbox command to initialize. + * + * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The + * SLI Host uses the command to activate an FCF after it has acquired FCF + * information via a READ_FCF mailbox command. This mailbox command also is used + * to indicate where received unsolicited frames from this FCF will be sent. By + * default this routine will set up the FCF to forward all unsolicited frames + * the the RQ ID passed in the @phba. This can be overridden by the caller for + * more complicated setups. + **/ +void +lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +{ + struct lpfc_mbx_reg_fcfi *reg_fcfi; + + memset(mbox, 0, sizeof(*mbox)); + reg_fcfi = &mbox->u.mqe.un.reg_fcfi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); + bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); + bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); + bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx); + /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ + bf_set(lpfc_reg_fcfi_mam, reg_fcfi, + (~phba->fcf.addr_mode) & 0x3); + if (phba->fcf.fcf_flag & FCF_VALID_VLAN) { + bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); + bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id); + } +} + +/** + * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @fcfi: FCFI to be unregistered. + * + * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). + * The SLI Host uses the command to inactivate an FCFI. + **/ +void +lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi) +{ + memset(mbox, 0, sizeof(*mbox)); + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI); + bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi); +} + +/** + * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command + * @mbox: pointer to lpfc mbox command to initialize. + * @ndlp: The nodelist structure that describes the RPI to resume. + * + * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a + * link event. + **/ +void +lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) +{ + struct lpfc_mbx_resume_rpi *resume_rpi; + + memset(mbox, 0, sizeof(*mbox)); + resume_rpi = &mbox->u.mqe.un.resume_rpi; + bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); + bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi); + bf_set(lpfc_resume_rpi_vpi, resume_rpi, + ndlp->vport->vpi + ndlp->vport->phba->vpi_base); + bf_set(lpfc_resume_rpi_vfi, resume_rpi, + ndlp->vport->vfi + ndlp->vport->phba->vfi_base); +} diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 35a976733398..e198c917c13e 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -28,8 +28,10 @@ #include <scsi/scsi.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -45,7 +47,7 @@ * @phba: HBA to allocate pools for * * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, - * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools + * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * * Notes: Not interrupt-safe. Must be called with no locks held. If any @@ -56,19 +58,30 @@ * -ENOMEM on failure (if any memory allocations fail) **/ int -lpfc_mem_alloc(struct lpfc_hba * phba) +lpfc_mem_alloc(struct lpfc_hba *phba, int align) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int longs; int i; - phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->lpfc_scsi_dma_buf_pool = + pci_pool_create("lpfc_scsi_dma_buf_pool", + phba->pcidev, + phba->cfg_sg_dma_buf_size, + phba->cfg_sg_dma_buf_size, + 0); + else + phba->lpfc_scsi_dma_buf_pool = + pci_pool_create("lpfc_scsi_dma_buf_pool", + phba->pcidev, phba->cfg_sg_dma_buf_size, + align, 0); if (!phba->lpfc_scsi_dma_buf_pool) goto fail; phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, - LPFC_BPL_SIZE, 8,0); + LPFC_BPL_SIZE, + align, 0); if (!phba->lpfc_mbuf_pool) goto fail_free_dma_buf_pool; @@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba) sizeof(struct lpfc_nodelist)); if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; - - phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, - LPFC_BPL_SIZE, 8, 0); - if (!phba->lpfc_hbq_pool) + phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", + phba->pcidev, + LPFC_HDR_BUF_SIZE, align, 0); + if (!phba->lpfc_hrb_pool) goto fail_free_nlp_mem_pool; + phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", + phba->pcidev, + LPFC_DATA_BUF_SIZE, align, 0); + if (!phba->lpfc_drb_pool) + goto fail_free_hbq_pool; /* vpi zero is reserved for the physical port so add 1 to max */ longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (!phba->vpi_bmask) - goto fail_free_hbq_pool; + goto fail_free_dbq_pool; return 0; + fail_free_dbq_pool: + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; fail_free_hbq_pool: - lpfc_sli_hbqbuf_free_all(phba); - pci_pool_destroy(phba->lpfc_hbq_pool); + pci_pool_destroy(phba->lpfc_hrb_pool); + phba->lpfc_hrb_pool = NULL; fail_free_nlp_mem_pool: mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; @@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba) } /** - * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc + * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc * @phba: HBA to free memory for * - * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, - * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and - * lpfc_nodelist. Also frees the VPI bitmask + * Description: Free the memory allocated by lpfc_mem_alloc routine. This + * routine is a the counterpart of lpfc_mem_alloc. * * Returns: None **/ void -lpfc_mem_free(struct lpfc_hba * phba) +lpfc_mem_free(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; - LPFC_MBOXQ_t *mbox, *next_mbox; - struct lpfc_dmabuf *mp; int i; + struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + /* Free VPI bitmask memory */ kfree(phba->vpi_bmask); + + /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); + pci_pool_destroy(phba->lpfc_drb_pool); + phba->lpfc_drb_pool = NULL; + pci_pool_destroy(phba->lpfc_hrb_pool); + phba->lpfc_hrb_pool = NULL; + + /* Free NLP memory pool */ + mempool_destroy(phba->nlp_mem_pool); + phba->nlp_mem_pool = NULL; + + /* Free mbox memory pool */ + mempool_destroy(phba->mbox_mem_pool); + phba->mbox_mem_pool = NULL; + + /* Free MBUF memory pool */ + for (i = 0; i < pool->current_count; i++) + pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, + pool->elements[i].phys); + kfree(pool->elements); + + pci_pool_destroy(phba->lpfc_mbuf_pool); + phba->lpfc_mbuf_pool = NULL; + /* Free DMA buffer memory pool */ + pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); + phba->lpfc_scsi_dma_buf_pool = NULL; + + return; +} + +/** + * lpfc_mem_free_all - Frees all PCI and driver memory + * @phba: HBA to free memory for + * + * Description: Free memory from PCI and driver memory pools and also those + * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees + * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees + * the VPI bitmask. + * + * Returns: None + **/ +void +lpfc_mem_free_all(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mbox, *next_mbox; + struct lpfc_dmabuf *mp; + + /* Free memory used in mailbox queue back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } + /* Free memory used in mailbox cmpl list back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } - + /* Free the active mailbox command back to the mailbox memory pool */ + spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(&phba->hbalock); if (psli->mbox_active) { mbox = psli->mbox_active; mp = (struct lpfc_dmabuf *) (mbox->context1); @@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba) psli->mbox_active = NULL; } - for (i = 0; i < pool->current_count; i++) - pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, - pool->elements[i].phys); - kfree(pool->elements); - - pci_pool_destroy(phba->lpfc_hbq_pool); - mempool_destroy(phba->nlp_mem_pool); - mempool_destroy(phba->mbox_mem_pool); - - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - pci_pool_destroy(phba->lpfc_mbuf_pool); - - phba->lpfc_hbq_pool = NULL; - phba->nlp_mem_pool = NULL; - phba->mbox_mem_pool = NULL; - phba->lpfc_scsi_dma_buf_pool = NULL; - phba->lpfc_mbuf_pool = NULL; + /* Free and destroy all the allocated memory pools */ + lpfc_mem_free(phba); /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; + + return; } /** @@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for * - * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI + * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI * pool along a non-DMA-mapped container for it. * * Notes: Not interrupt-safe. Must be called with no locks held. @@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) if (!hbqbp) return NULL; - hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, + hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, &hbqbp->dbuf.phys); if (!hbqbp->dbuf.virt) { kfree(hbqbp); @@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) } /** - * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc + * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc * @phba: HBA buffer was allocated for * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc * @@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) void lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) { - pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); + pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); kfree(hbqbp); return; } /** + * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer + * @phba: HBA to allocate a receive buffer for + * + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI + * pool along a non-DMA-mapped container for it. + * + * Notes: Not interrupt-safe. Must be called with no locks held. + * + * Returns: + * pointer to HBQ on success + * NULL on failure + **/ +struct hbq_dmabuf * +lpfc_sli4_rb_alloc(struct lpfc_hba *phba) +{ + struct hbq_dmabuf *dma_buf; + + dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); + if (!dma_buf) + return NULL; + + dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + &dma_buf->hbuf.phys); + if (!dma_buf->hbuf.virt) { + kfree(dma_buf); + return NULL; + } + dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, + &dma_buf->dbuf.phys); + if (!dma_buf->dbuf.virt) { + pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, + dma_buf->hbuf.phys); + kfree(dma_buf); + return NULL; + } + dma_buf->size = LPFC_BPL_SIZE; + return dma_buf; +} + +/** + * lpfc_sli4_rb_free - Frees a receive buffer + * @phba: HBA buffer was allocated for + * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc + * + * Description: Frees both the container and the DMA-mapped buffers returned by + * lpfc_sli4_rb_alloc. + * + * Notes: Can be called with or without locks held. + * + * Returns: None + **/ +void +lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) +{ + pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); + pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); + kfree(dmab); + return; +} + +/** * lpfc_in_buf_free - Free a DMA buffer * @phba: HBA buffer is associated with * @mp: Buffer to free diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 08cdc77af41c..09f659f77bb3 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -28,8 +28,10 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (!mbox) goto out; - rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, + rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, (uint8_t *) sp, mbox, 0); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); @@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + if ((ndlp->nlp_type & NLP_FABRIC) && + vport->port_type == LPFC_NPIV_PORT) { + lpfc_linkdown_port(vport); + mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(shost->host_lock); - if ((!(ndlp->nlp_type & NLP_FABRIC) && - ((ndlp->nlp_type & NLP_FCP_TARGET) || - !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || - (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { + ndlp->nlp_last_elscmd = ELS_CMD_FDISC; + } else if ((!(ndlp->nlp_type & NLP_FABRIC) && + ((ndlp->nlp_type & NLP_FCP_TARGET) || + !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || + (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(shost->host_lock); @@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - if (!ndlp->nlp_rpi) { + if (!(ndlp->nlp_flag & NLP_RPI_VALID)) { ndlp->nlp_flag &= ~NLP_NPR_ADISC; return 0; } @@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_unreg_rpi(vport, ndlp); - if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, + if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, (uint8_t *) sp, mbox, 0) == 0) { switch (ndlp->nlp_DID) { case NameServer_DID: @@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; ADISC *ap; + int rc; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; @@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, return ndlp->nlp_state; } + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_sli4_resume_rpi(ndlp); + if (rc) { + /* Stay in state and retry. */ + ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; + return ndlp->nlp_state; + } + } + if (ndlp->nlp_type & NLP_FCP_TARGET) { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); @@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } + return ndlp->nlp_state; } @@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { - if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { lpfc_nlp_put(ndlp); mb->context2 = NULL; @@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { - if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { @@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; uint32_t did = mb->un.varWords[1]; if (mb->mbxStatus) { @@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } ndlp->nlp_rpi = mb->un.varWords[0]; + ndlp->nlp_flag |= NLP_RPI_VALID; /* Only if we are not a fabric nport do we issue PRLI */ if (!(ndlp->nlp_type & NLP_FABRIC)) { @@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, void *arg, uint32_t evt) { LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; - MAILBOX_t *mb = &pmb->mb; + MAILBOX_t *mb = &pmb->u.mb; - if (!mb->mbxStatus) + if (!mb->mbxStatus) { ndlp->nlp_rpi = mb->un.varWords[0]; - else { + ndlp->nlp_flag |= NLP_RPI_VALID; + } else { if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 8032c5adb6a9..e9fa6762044a 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -31,8 +31,10 @@ #include <scsi/scsi_transport_fc.h> #include "lpfc_version.h" +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -57,6 +59,8 @@ static char *dif_op_str[] = { "SCSI_PROT_READ_CONVERT", "SCSI_PROT_WRITE_CONVERT" }; +static void +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void lpfc_debug_save_data(struct scsi_cmnd *cmnd) @@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { new_queue_depth = @@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { if (vports[i]->cfg_lun_queue_depth <= @@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { rport = starget_to_rport(scsi_target(sdev)); @@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) } /** - * lpfc_new_scsi_buf - Scsi buffer allocator + * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. * - * This routine allocates a scsi buffer, which contains all the necessary - * information needed to initiate a SCSI I/O. The non-DMAable buffer region - * contains information to build the IOCB. The DMAable region contains - * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to - * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL - * and the BPL BDE is setup in the IOCB. + * This routine allocates a scsi buffer for device with SLI-3 interface spec, + * the scsi buffer contains all the necessary information needed to initiate + * a SCSI I/O. The non-DMAable buffer region contains information to build + * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, + * and the initial BPL. In addition to allocating memory, the FCP CMND and + * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. * * Return codes: - * NULL - Error - * Pointer to lpfc_scsi_buf data structure - Success + * int - number of scsi buffers that were allocated. + * 0 = failure, less than num_to_alloc is a partial failure. **/ -static struct lpfc_scsi_buf * -lpfc_new_scsi_buf(struct lpfc_vport *vport) +static int +lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) { struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *psb; @@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; + int bcnt; - psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); - if (!psb) - return NULL; + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); + if (!psb) + break; + + /* + * Get memory from the pci pool to map the virt space to pci + * bus space for an I/O. The DMA buffer includes space for the + * struct fcp_cmnd, struct fcp_rsp and the number of bde's + * necessary to support the sg_tablesize. + */ + psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, + GFP_KERNEL, &psb->dma_handle); + if (!psb->data) { + kfree(psb); + break; + } + + /* Initialize virtual ptrs to dma_buf region. */ + memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; + + psb->fcp_cmnd = psb->data; + psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); + psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); + + /* Initialize local short-hand pointers. */ + bpl = psb->fcp_bpl; + pdma_phys_fcp_cmd = psb->dma_handle; + pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); + pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); + + /* + * The first two bdes are the FCP_CMD and FCP_RSP. The balance + * are sg list bdes. Initialize the first two and leave the + * rest for queuecommand. + */ + bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); + bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); + bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); + bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); + + /* Setup the physical region for the FCP RSP */ + bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); + bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); + bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); + bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); + + /* + * Since the IOCB for the FCP I/O is built into this + * lpfc_scsi_buf, initialize it with all known data now. + */ + iocb = &psb->cur_iocbq.iocb; + iocb->un.fcpi64.bdl.ulpIoTag32 = 0; + if ((phba->sli_rev == 3) && + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { + /* fill in immediate fcp command BDE */ + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; + iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); + iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, + unsli3.fcp_ext.icd); + iocb->un.fcpi64.bdl.addrHigh = 0; + iocb->ulpBdeCount = 0; + iocb->ulpLe = 0; + /* fill in responce BDE */ + iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = + BUFF_TYPE_BDE_64; + iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = + sizeof(struct fcp_rsp); + iocb->unsli3.fcp_ext.rbde.addrLow = + putPaddrLow(pdma_phys_fcp_rsp); + iocb->unsli3.fcp_ext.rbde.addrHigh = + putPaddrHigh(pdma_phys_fcp_rsp); + } else { + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + iocb->un.fcpi64.bdl.bdeSize = + (2 * sizeof(struct ulp_bde64)); + iocb->un.fcpi64.bdl.addrLow = + putPaddrLow(pdma_phys_bpl); + iocb->un.fcpi64.bdl.addrHigh = + putPaddrHigh(pdma_phys_bpl); + iocb->ulpBdeCount = 1; + iocb->ulpLe = 1; + } + iocb->ulpClass = CLASS3; + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); - /* - * Get memory from the pci pool to map the virt space to pci bus space - * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, - * struct fcp_rsp and the number of bde's necessary to support the - * sg_tablesize. - */ - psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, - &psb->dma_handle); - if (!psb->data) { - kfree(psb); - return NULL; } - /* Initialize virtual ptrs to dma_buf region. */ - memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + return bcnt; +} - /* Allocate iotag for psb->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); - if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); - kfree (psb); - return NULL; +/** + * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort + * @phba: pointer to lpfc hba data structure. + * @axri: pointer to the fcp xri abort wcqe structure. + * + * This routine is invoked by the worker thread to process a SLI4 fast-path + * FCP aborted xri. + **/ +void +lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri) +{ + uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); + struct lpfc_scsi_buf *psb, *next_psb; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); + list_for_each_entry_safe(psb, next_psb, + &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { + if (psb->cur_iocbq.sli4_xritag == xri) { + list_del(&psb->list); + psb->status = IOSTAT_SUCCESS; + spin_unlock_irqrestore( + &phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + lpfc_release_scsi_buf_s4(phba, psb); + return; + } + } + spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); +} + +/** + * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block + * @phba: pointer to lpfc hba data structure. + * + * This routine walks the list of scsi buffers that have been allocated and + * repost them to the HBA by using SGL block post. This is needed after a + * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine + * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list + * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. + * + * Returns: 0 = success, non-zero failure. + **/ +int +lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_scsi_buf *psb; + int index, status, bcnt = 0, rcnt = 0, rc = 0; + LIST_HEAD(sblist); + + for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { + psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; + if (psb) { + /* Remove from SCSI buffer list */ + list_del(&psb->list); + /* Add it to a local SCSI buffer list */ + list_add_tail(&psb->list, &sblist); + if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { + bcnt = rcnt; + rcnt = 0; + } + } else + /* A hole present in the XRI array, need to skip */ + bcnt = rcnt; + + if (index == phba->sli4_hba.scsi_xri_cnt - 1) + /* End of XRI array for SCSI buffer, complete */ + bcnt = rcnt; + + /* Continue until collect up to a nembed page worth of sgls */ + if (bcnt == 0) + continue; + /* Now, post the SCSI buffer list sgls as a block */ + status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); + /* Reset SCSI buffer count for next round of posting */ + bcnt = 0; + while (!list_empty(&sblist)) { + list_remove_head(&sblist, psb, struct lpfc_scsi_buf, + list); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + } } - psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; + return rc; +} - psb->fcp_cmnd = psb->data; - psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); - psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp); +/** + * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine allocates a scsi buffer for device with SLI-4 interface spec, + * the scsi buffer contains all the necessary information needed to initiate + * a SCSI I/O. + * + * Return codes: + * int - number of scsi buffers that were allocated. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +static int +lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_scsi_buf *psb; + struct sli4_sge *sgl; + IOCB_t *iocb; + dma_addr_t pdma_phys_fcp_cmd; + dma_addr_t pdma_phys_fcp_rsp; + dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; + uint16_t iotag, last_xritag = NO_XRI; + int status = 0, index; + int bcnt; + int non_sequential_xri = 0; + int rc = 0; + LIST_HEAD(sblist); + + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { + psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); + if (!psb) + break; - /* Initialize local short-hand pointers. */ - bpl = psb->fcp_bpl; - pdma_phys_fcp_cmd = psb->dma_handle; - pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); - pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp); + /* + * Get memory from the pci pool to map the virt space to pci bus + * space for an I/O. The DMA buffer includes space for the + * struct fcp_cmnd, struct fcp_rsp and the number of bde's + * necessary to support the sg_tablesize. + */ + psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, + GFP_KERNEL, &psb->dma_handle); + if (!psb->data) { + kfree(psb); + break; + } - /* - * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg - * list bdes. Initialize the first two and leave the rest for - * queuecommand. - */ - bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); - bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); - bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); - bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); - - /* Setup the physical region for the FCP RSP */ - bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); - bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); - bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); - bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); + /* Initialize virtual ptrs to dma_buf region. */ + memset(psb->data, 0, phba->cfg_sg_dma_buf_size); - /* - * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, - * initialize it with all known data now. - */ - iocb = &psb->cur_iocbq.iocb; - iocb->un.fcpi64.bdl.ulpIoTag32 = 0; - if ((phba->sli_rev == 3) && - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { - /* fill in immediate fcp command BDE */ - iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + kfree(psb); + break; + } + + psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); + if (psb->cur_iocbq.sli4_xritag == NO_XRI) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + if (last_xritag != NO_XRI + && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { + non_sequential_xri = 1; + } else + list_add_tail(&psb->list, &sblist); + last_xritag = psb->cur_iocbq.sli4_xritag; + + index = phba->sli4_hba.scsi_xri_cnt++; + psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; + + psb->fcp_bpl = psb->data; + psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) + - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + + sizeof(struct fcp_cmnd)); + + /* Initialize local short-hand pointers. */ + sgl = (struct sli4_sge *)psb->fcp_bpl; + pdma_phys_bpl = psb->dma_handle; + pdma_phys_fcp_cmd = + (psb->dma_handle + phba->cfg_sg_dma_buf_size) + - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + + /* + * The first two bdes are the FCP_CMD and FCP_RSP. The balance + * are sg list bdes. Initialize the first two and leave the + * rest for queuecommand. + */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); + bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd)); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + sgl++; + + /* Setup the physical region for the FCP RSP */ + sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); + bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp)); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + + /* + * Since the IOCB for the FCP I/O is built into this + * lpfc_scsi_buf, initialize it with all known data now. + */ + iocb = &psb->cur_iocbq.iocb; + iocb->un.fcpi64.bdl.ulpIoTag32 = 0; + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; + /* setting the BLP size to 2 * sizeof BDE may not be correct. + * We are setting the bpl to point to out sgl. An sgl's + * entries are 16 bytes, a bpl entries are 12 bytes. + */ iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); - iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, - unsli3.fcp_ext.icd); - iocb->un.fcpi64.bdl.addrHigh = 0; - iocb->ulpBdeCount = 0; - iocb->ulpLe = 0; - /* fill in responce BDE */ - iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; - iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = - sizeof(struct fcp_rsp); - iocb->unsli3.fcp_ext.rbde.addrLow = - putPaddrLow(pdma_phys_fcp_rsp); - iocb->unsli3.fcp_ext.rbde.addrHigh = - putPaddrHigh(pdma_phys_fcp_rsp); - } else { - iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); - iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl); - iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl); + iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); + iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); iocb->ulpBdeCount = 1; iocb->ulpLe = 1; + iocb->ulpClass = CLASS3; + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + psb->dma_phys_bpl = pdma_phys_bpl; + phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; + if (non_sequential_xri) { + status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, + pdma_phys_bpl1, + psb->cur_iocbq.sli4_xritag); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + break; + } + } + if (bcnt) { + status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); + /* Reset SCSI buffer count for next round of posting */ + while (!list_empty(&sblist)) { + list_remove_head(&sblist, psb, struct lpfc_scsi_buf, + list); + if (status) { + /* Put this back on the abort scsi list */ + psb->status = IOSTAT_LOCAL_REJECT; + psb->result = IOERR_ABORT_REQUESTED; + rc++; + } else + psb->status = IOSTAT_SUCCESS; + /* Put it back into the SCSI buffer list */ + lpfc_release_scsi_buf_s4(phba, psb); + } } - iocb->ulpClass = CLASS3; - return psb; + return bcnt + non_sequential_xri - rc; } /** - * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba - * @phba: The Hba for which this call is being executed. + * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator + * @vport: The virtual port for which this call being executed. + * @num_to_allocate: The requested number of buffers to allocate. + * + * This routine wraps the actual SCSI buffer allocator function pointer from + * the lpfc_hba struct. + * + * Return codes: + * int - number of scsi buffers that were allocated. + * 0 = failure, less than num_to_alloc is a partial failure. + **/ +static inline int +lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) +{ + return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); +} + +/** + * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA + * @phba: The HBA for which this call is being executed. * * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list * and returns to caller. @@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) } /** - * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list + * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * @@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) * lpfc_scsi_buf_list list. **/ static void -lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; @@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) } /** - * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer + * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is being released. + * + * This routine releases @psb scsi buffer by adding it to tail of @phba + * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer + * and cannot be reused for at least RA_TOV amount of time if it was + * aborted. + **/ +static void +lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + unsigned long iflag = 0; + + if (psb->status == IOSTAT_LOCAL_REJECT + && psb->result == IOERR_ABORT_REQUESTED) { + spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + psb->pCmd = NULL; + list_add_tail(&psb->list, + &phba->sli4_hba.lpfc_abts_scsi_buf_list); + spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, + iflag); + } else { + + spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->pCmd = NULL; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + } +} + +/** + * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is being released. + * + * This routine releases @psb scsi buffer by adding it to tail of @phba + * lpfc_scsi_buf_list list. + **/ +static void +lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + + phba->lpfc_release_scsi_buf(phba, psb); +} + +/** + * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This routine does the pci dma mapping for scatter-gather list of scsi cmnd - * field of @lpfc_cmd. This routine scans through sg elements and format the - * bdea. This routine also initializes all IOCB fields which are dependent on - * scsi command request buffer. + * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans + * through sg elements and format the bdea. This routine also initializes all + * IOCB fields which are dependent on scsi command request buffer. * * Return codes: * 1 - Error * 0 - Success **/ static int -lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; @@ -1412,6 +1759,133 @@ out: } /** + * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine does the pci dma mapping for scatter-gather list of scsi cmnd + * field of @lpfc_cmd for device with SLI-4 interface spec. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static int +lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; + struct scatterlist *sgel = NULL; + struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; + struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; + dma_addr_t physaddr; + uint32_t num_bde = 0; + uint32_t dma_len; + uint32_t dma_offset = 0; + int nseg; + + /* + * There are three possibilities here - use scatter-gather segment, use + * the single mapping, or neither. Start the lpfc command prep by + * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first + * data bde entry. + */ + if (scsi_sg_count(scsi_cmnd)) { + /* + * The driver stores the segment count returned from pci_map_sg + * because this a count of dma-mappings used to map the use_sg + * pages. They are not guaranteed to be the same for those + * architectures that implement an IOMMU. + */ + + nseg = scsi_dma_map(scsi_cmnd); + if (unlikely(!nseg)) + return 1; + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl += 1; + + lpfc_cmd->seg_cnt = nseg; + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + printk(KERN_ERR "%s: Too many sg segments from " + "dma_map_sg. Config %d, seg_cnt %d\n", + __func__, phba->cfg_sg_seg_cnt, + lpfc_cmd->seg_cnt); + scsi_dma_unmap(scsi_cmnd); + return 1; + } + + /* + * The driver established a maximum scatter-gather segment count + * during probe that limits the number of sg elements in any + * single scsi command. Just run through the seg_cnt and format + * the sge's. + * When using SLI-3 the driver will try to fit all the BDEs into + * the IOCB. If it can't then the BDEs get added to a BPL as it + * does for SLI-2 mode. + */ + scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { + physaddr = sg_dma_address(sgel); + dma_len = sg_dma_len(sgel); + bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel)); + sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); + if ((num_bde + 1) == nseg) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + dma_offset += dma_len; + sgl++; + } + } else { + sgl += 1; + /* clear the last flag in the fcp_rsp map entry */ + sgl->word2 = le32_to_cpu(sgl->word2); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + } + + /* + * Finish initializing those IOCB fields that are dependent on the + * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is + * explicitly reinitialized. + * all iocb memory resources are reused. + */ + fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); + + /* + * Due to difference in data length between DIF/non-DIF paths, + * we need to set word 4 of IOCB here + */ + iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); + return 0; +} + +/** + * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be mapped. + * + * This routine wraps the actual DMA mapping function pointer from the + * lpfc_hba struct. + * + * Return codes: + * 1 - Error + * 0 - Success + **/ +static inline int +lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); +} + +/** * lpfc_send_scsi_error_event - Posts an event when there is SCSI error * @phba: Pointer to hba context object. * @vport: Pointer to vport object. @@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, } /** - * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather - * @phba: The Hba for which this call is being executed. + * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev + * @phba: The HBA for which this call is being executed. * @psb: The scsi buffer which is going to be un-mapped. * * This routine does DMA un-mapping of scatter gather list of scsi command - * field of @lpfc_cmd. + * field of @lpfc_cmd for device with SLI-3 interface spec. **/ static void -lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) +lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { /* * There are only two special cases to consider. (1) the scsi command @@ -1529,6 +2003,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) } /** + * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is going to be un-mapped. + * + * This routine does DMA un-mapping of scatter gather list of scsi command + * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to + * remove the sgl for this scsi buffer then we will do it here. For now + * we should be able to just call the sli3 unprep routine. + **/ +static void +lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + lpfc_scsi_unprep_dma_buf_s3(phba, psb); +} + +/** + * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list + * @phba: The Hba for which this call is being executed. + * @psb: The scsi buffer which is going to be un-mapped. + * + * This routine does DMA un-mapping of scatter gather list of scsi command + * field of @lpfc_cmd for device with SLI-4 interface spec. + **/ +static void +lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +{ + phba->lpfc_scsi_unprep_dma_buf(phba, psb); +} + +/** * lpfc_handler_fcp_err - FCP response handler * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. @@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine * @phba: The Hba for which this call is being executed. * @pIocbIn: The command IOCBQ for the scsi cmnd. - * @pIocbOut: The response IOCBQ for the scsi cmnd . + * @pIocbOut: The response IOCBQ for the scsi cmnd. * * This routine assigns scsi command result by looking into response IOCB * status field appropriately. This routine handles QUEUE FULL condition as @@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) } /** - * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit + * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: The scsi command which needs to send. * @pnode: Pointer to lpfc_nodelist. * * This routine initializes fcp_cmnd and iocb data structure from scsi command - * to transfer. + * to transfer for device with SLI3 interface spec. **/ static void -lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, +lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_nodelist *pnode) { struct lpfc_hba *phba = vport->phba; @@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; - iocb_cmd->un.fcpi.fcpi_parm = 0; - iocb_cmd->ulpPU = 0; + if (phba->sli_rev < LPFC_SLI_REV4) { + iocb_cmd->un.fcpi.fcpi_parm = 0; + iocb_cmd->ulpPU = 0; + } else + iocb_cmd->ulpPU = PARM_READ_CHECK; fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; } else { @@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } /** - * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit + * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: The scsi command which needs to send. + * @pnode: Pointer to lpfc_nodelist. + * + * This routine initializes fcp_cmnd and iocb data structure from scsi command + * to transfer for device with SLI4 interface spec. + **/ +static void +lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_nodelist *pnode) +{ + /* + * The prep cmnd routines do not touch the sgl or its + * entries. We may not have to do anything different. + * I will leave this function in place until we can + * run some IO through the driver and determine if changes + * are needed. + */ + return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode); +} + +/** + * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: The scsi command which needs to send. + * @pnode: Pointer to lpfc_nodelist. + * + * This routine wraps the actual convert SCSI cmnd function pointer from + * the lpfc_hba struct. + **/ +static inline void +lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, + struct lpfc_nodelist *pnode) +{ + vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode); +} + +/** + * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. * @lun: Logical unit number. * @task_mgmt_cmd: SCSI task management command. * - * This routine creates FCP information unit corresponding to @task_mgmt_cmd. + * This routine creates FCP information unit corresponding to @task_mgmt_cmd + * for device with SLI-3 interface spec. * * Return codes: * 0 - Error * 1 - Success **/ static int -lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, +lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, unsigned int lun, uint8_t task_mgmt_cmd) @@ -2114,6 +2661,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, } /** + * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * @lun: Logical unit number. + * @task_mgmt_cmd: SCSI task management command. + * + * This routine creates FCP information unit corresponding to @task_mgmt_cmd + * for device with SLI-4 interface spec. + * + * Return codes: + * 0 - Error + * 1 - Success + **/ +static int +lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, + struct lpfc_scsi_buf *lpfc_cmd, + unsigned int lun, + uint8_t task_mgmt_cmd) +{ + /* + * The prep cmnd routines do not touch the sgl or its + * entries. We may not have to do anything different. + * I will leave this function in place until we can + * run some IO through the driver and determine if changes + * are needed. + */ + return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun, + task_mgmt_cmd); +} + +/** + * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * @lun: Logical unit number. + * @task_mgmt_cmd: SCSI task management command. + * + * This routine wraps the actual convert SCSI TM to FCP information unit + * function pointer from the lpfc_hba struct. + * + * Return codes: + * 0 - Error + * 1 - Success + **/ +static inline int +lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, + struct lpfc_scsi_buf *lpfc_cmd, + unsigned int lun, + uint8_t task_mgmt_cmd) +{ + struct lpfc_hba *phba = vport->phba; + + return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, + task_mgmt_cmd); +} + +/** + * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the SCSI interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; + phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; + phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3; + phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3; + phba->lpfc_scsi_prep_task_mgmt_cmd = + lpfc_scsi_prep_task_mgmt_cmd_s3; + phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; + phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; + phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4; + phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4; + phba->lpfc_scsi_prep_task_mgmt_cmd = + lpfc_scsi_prep_task_mgmt_cmd_s4; + phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1418 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; + phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; + return 0; +} + +/** * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command * @phba: The Hba for which this call is being executed. * @cmdiocbq: Pointer to lpfc_iocbq data structure. @@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); - status = lpfc_sli_issue_iocb_wait(phba, - &phba->sli.ring[phba->sli.fcp_ring], - iocbq, iocbqrsp, lpfc_cmd->timeout); + status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, + iocbq, iocbqrsp, lpfc_cmd->timeout); if (status != IOCB_SUCCESS) { if (status == IOCB_TIMEDOUT) { iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; @@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_sli *psli = &phba->sli; struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_nodelist *ndlp = rdata->pnode; struct lpfc_scsi_buf *lpfc_cmd; @@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); atomic_inc(&ndlp->cmd_pending); - err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], + err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); if (err) { atomic_dec(&ndlp->cmd_pending); @@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; struct lpfc_iocbq *iocb; struct lpfc_iocbq *abtsiocb; struct lpfc_scsi_buf *lpfc_cmd; @@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) icmd = &abtsiocb->iocb; icmd->un.acxri.abortType = ABORT_TYPE_ABTS; icmd->un.acxri.abortContextTag = cmd->ulpContext; - icmd->un.acxri.abortIoTag = cmd->ulpIoTag; + if (phba->sli_rev == LPFC_SLI_REV4) + icmd->un.acxri.abortIoTag = iocb->sli4_xritag; + else + icmd->un.acxri.abortIoTag = cmd->ulpIoTag; icmd->ulpLe = 1; icmd->ulpClass = cmd->ulpClass; @@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; - if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == + IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; @@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) "0703 Issue target reset to TGT %d LUN %d " "rpi x%x nlp_flag x%x\n", cmnd->device->id, cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); - status = lpfc_sli_issue_iocb_wait(phba, - &phba->sli.ring[phba->sli.fcp_ring], + status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, iocbq, iocbqrsp, lpfc_cmd->timeout); if (status == IOCB_TIMEDOUT) { iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; @@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; - struct lpfc_scsi_buf *scsi_buf = NULL; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - uint32_t total = 0, i; + uint32_t total = 0; uint32_t num_to_alloc = 0; - unsigned long flags; + int num_allocated = 0; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; @@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev) (phba->cfg_hba_queue_depth - total)); num_to_alloc = phba->cfg_hba_queue_depth - total; } - - for (i = 0; i < num_to_alloc; i++) { - scsi_buf = lpfc_new_scsi_buf(vport); - if (!scsi_buf) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0706 Failed to allocate " - "command buffer\n"); - break; - } - - spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); - phba->total_scsi_bufs++; - list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); + num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); + if (num_to_alloc != num_allocated) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); } return 0; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index c7c440d5fa29..65dfc8bd5b49 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -140,6 +140,8 @@ struct lpfc_scsi_buf { struct fcp_rsp *fcp_rsp; struct ulp_bde64 *fcp_bpl; + dma_addr_t dma_phys_bpl; + /* cur_iocbq has phys of the dma-able buffer. * Iotag is in here */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index eb5c75c45ba4..ff04daf18f48 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2008 Emulex. All rights reserved. * + * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -29,9 +29,12 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fs.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -40,24 +43,7 @@ #include "lpfc_logmsg.h" #include "lpfc_compat.h" #include "lpfc_debugfs.h" - -/* - * Define macro to log: Mailbox command x%x cannot issue Data - * This allows multiple uses of lpfc_msgBlk0311 - * w/o perturbing log msg utility. - */ -#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \ - lpfc_printf_log(phba, \ - KERN_INFO, \ - LOG_MBOX | LOG_SLI, \ - "(%d):0311 Mailbox command x%x cannot " \ - "issue Data: x%x x%x x%x\n", \ - pmbox->vport ? pmbox->vport->vpi : 0, \ - pmbox->mb.mbxCommand, \ - phba->pport->port_state, \ - psli->sli_flag, \ - flag) - +#include "lpfc_vport.h" /* There are only four IOCB completion types. */ typedef enum _lpfc_iocb_type { @@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type { LPFC_ABORT_IOCB } lpfc_iocb_type; + +/* Provide function prototypes local to this module. */ +static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, + uint32_t); +static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, + uint8_t *, uint32_t *); + +static IOCB_t * +lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) +{ + return &iocbq->iocb; +} + +/** + * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue + * @q: The Work Queue to operate on. + * @wqe: The work Queue Entry to put on the Work queue. + * + * This routine will copy the contents of @wqe to the next available entry on + * the @q. This function will then ring the Work Queue Doorbell to signal the + * HBA to start processing the Work Queue Entry. This function returns 0 if + * successful. If no entries are available on @q then this function will return + * -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +static uint32_t +lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) +{ + union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; + struct lpfc_register doorbell; + uint32_t host_index; + + /* If the host has not yet processed the next entry then we are done */ + if (((q->host_index + 1) % q->entry_count) == q->hba_index) + return -ENOMEM; + /* set consumption flag every once in a while */ + if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) + bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); + + lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); + + /* Update the host index before invoking device */ + host_index = q->host_index; + q->host_index = ((q->host_index + 1) % q->entry_count); + + /* Ring Doorbell */ + doorbell.word0 = 0; + bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); + bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); + bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); + readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ + + return 0; +} + +/** + * lpfc_sli4_wq_release - Updates internal hba index for WQ + * @q: The Work Queue to operate on. + * @index: The index to advance the hba index to. + * + * This routine will update the HBA index of a queue to reflect consumption of + * Work Queue Entries by the HBA. When the HBA indicates that it has consumed + * an entry the host calls this function to update the queue's internal + * pointers. This routine returns the number of entries that were consumed by + * the HBA. + **/ +static uint32_t +lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) +{ + uint32_t released = 0; + + if (q->hba_index == index) + return 0; + do { + q->hba_index = ((q->hba_index + 1) % q->entry_count); + released++; + } while (q->hba_index != index); + return released; +} + +/** + * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue + * @q: The Mailbox Queue to operate on. + * @wqe: The Mailbox Queue Entry to put on the Work queue. + * + * This routine will copy the contents of @mqe to the next available entry on + * the @q. This function will then ring the Work Queue Doorbell to signal the + * HBA to start processing the Work Queue Entry. This function returns 0 if + * successful. If no entries are available on @q then this function will return + * -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +static uint32_t +lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) +{ + struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; + struct lpfc_register doorbell; + uint32_t host_index; + + /* If the host has not yet processed the next entry then we are done */ + if (((q->host_index + 1) % q->entry_count) == q->hba_index) + return -ENOMEM; + lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); + /* Save off the mailbox pointer for completion */ + q->phba->mbox = (MAILBOX_t *)temp_mqe; + + /* Update the host index before invoking device */ + host_index = q->host_index; + q->host_index = ((q->host_index + 1) % q->entry_count); + + /* Ring Doorbell */ + doorbell.word0 = 0; + bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); + bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); + readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ + return 0; +} + +/** + * lpfc_sli4_mq_release - Updates internal hba index for MQ + * @q: The Mailbox Queue to operate on. + * + * This routine will update the HBA index of a queue to reflect consumption of + * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed + * an entry the host calls this function to update the queue's internal + * pointers. This routine returns the number of entries that were consumed by + * the HBA. + **/ +static uint32_t +lpfc_sli4_mq_release(struct lpfc_queue *q) +{ + /* Clear the mailbox pointer for completion */ + q->phba->mbox = NULL; + q->hba_index = ((q->hba_index + 1) % q->entry_count); + return 1; +} + +/** + * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ + * @q: The Event Queue to get the first valid EQE from + * + * This routine will get the first valid Event Queue Entry from @q, update + * the queue's internal hba index, and return the EQE. If no valid EQEs are in + * the Queue (no more work to do), or the Queue is full of EQEs that have been + * processed, but not popped back to the HBA then this routine will return NULL. + **/ +static struct lpfc_eqe * +lpfc_sli4_eq_get(struct lpfc_queue *q) +{ + struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; + + /* If the next EQE is not valid then we are done */ + if (!bf_get(lpfc_eqe_valid, eqe)) + return NULL; + /* If the host has not yet processed the next entry then we are done */ + if (((q->hba_index + 1) % q->entry_count) == q->host_index) + return NULL; + + q->hba_index = ((q->hba_index + 1) % q->entry_count); + return eqe; +} + +/** + * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ + * @q: The Event Queue that the host has completed processing for. + * @arm: Indicates whether the host wants to arms this CQ. + * + * This routine will mark all Event Queue Entries on @q, from the last + * known completed entry to the last entry that was processed, as completed + * by clearing the valid bit for each completion queue entry. Then it will + * notify the HBA, by ringing the doorbell, that the EQEs have been processed. + * The internal host index in the @q will be updated by this routine to indicate + * that the host has finished processing the entries. The @arm parameter + * indicates that the queue should be rearmed when ringing the doorbell. + * + * This function will return the number of EQEs that were popped. + **/ +uint32_t +lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) +{ + uint32_t released = 0; + struct lpfc_eqe *temp_eqe; + struct lpfc_register doorbell; + + /* while there are valid entries */ + while (q->hba_index != q->host_index) { + temp_eqe = q->qe[q->host_index].eqe; + bf_set(lpfc_eqe_valid, temp_eqe, 0); + released++; + q->host_index = ((q->host_index + 1) % q->entry_count); + } + if (unlikely(released == 0 && !arm)) + return 0; + + /* ring doorbell for number popped */ + doorbell.word0 = 0; + if (arm) { + bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); + bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); + } + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); + bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); + bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); + return released; +} + +/** + * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ + * @q: The Completion Queue to get the first valid CQE from + * + * This routine will get the first valid Completion Queue Entry from @q, update + * the queue's internal hba index, and return the CQE. If no valid CQEs are in + * the Queue (no more work to do), or the Queue is full of CQEs that have been + * processed, but not popped back to the HBA then this routine will return NULL. + **/ +static struct lpfc_cqe * +lpfc_sli4_cq_get(struct lpfc_queue *q) +{ + struct lpfc_cqe *cqe; + + /* If the next CQE is not valid then we are done */ + if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) + return NULL; + /* If the host has not yet processed the next entry then we are done */ + if (((q->hba_index + 1) % q->entry_count) == q->host_index) + return NULL; + + cqe = q->qe[q->hba_index].cqe; + q->hba_index = ((q->hba_index + 1) % q->entry_count); + return cqe; +} + +/** + * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ + * @q: The Completion Queue that the host has completed processing for. + * @arm: Indicates whether the host wants to arms this CQ. + * + * This routine will mark all Completion queue entries on @q, from the last + * known completed entry to the last entry that was processed, as completed + * by clearing the valid bit for each completion queue entry. Then it will + * notify the HBA, by ringing the doorbell, that the CQEs have been processed. + * The internal host index in the @q will be updated by this routine to indicate + * that the host has finished processing the entries. The @arm parameter + * indicates that the queue should be rearmed when ringing the doorbell. + * + * This function will return the number of CQEs that were released. + **/ +uint32_t +lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) +{ + uint32_t released = 0; + struct lpfc_cqe *temp_qe; + struct lpfc_register doorbell; + + /* while there are valid entries */ + while (q->hba_index != q->host_index) { + temp_qe = q->qe[q->host_index].cqe; + bf_set(lpfc_cqe_valid, temp_qe, 0); + released++; + q->host_index = ((q->host_index + 1) % q->entry_count); + } + if (unlikely(released == 0 && !arm)) + return 0; + + /* ring doorbell for number popped */ + doorbell.word0 = 0; + if (arm) + bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); + bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); + bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); + bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); + writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); + return released; +} + +/** + * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue + * @q: The Header Receive Queue to operate on. + * @wqe: The Receive Queue Entry to put on the Receive queue. + * + * This routine will copy the contents of @wqe to the next available entry on + * the @q. This function will then ring the Receive Queue Doorbell to signal the + * HBA to start processing the Receive Queue Entry. This function returns the + * index that the rqe was copied to if successful. If no entries are available + * on @q then this function will return -ENOMEM. + * The caller is expected to hold the hbalock when calling this routine. + **/ +static int +lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, + struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) +{ + struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; + struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; + struct lpfc_register doorbell; + int put_index = hq->host_index; + + if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) + return -EINVAL; + if (hq->host_index != dq->host_index) + return -EINVAL; + /* If the host has not yet processed the next entry then we are done */ + if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) + return -EBUSY; + lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); + lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); + + /* Update the host index to point to the next slot */ + hq->host_index = ((hq->host_index + 1) % hq->entry_count); + dq->host_index = ((dq->host_index + 1) % dq->entry_count); + + /* Ring The Header Receive Queue Doorbell */ + if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { + doorbell.word0 = 0; + bf_set(lpfc_rq_doorbell_num_posted, &doorbell, + LPFC_RQ_POST_BATCH); + bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); + writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); + } + return put_index; +} + +/** + * lpfc_sli4_rq_release - Updates internal hba index for RQ + * @q: The Header Receive Queue to operate on. + * + * This routine will update the HBA index of a queue to reflect consumption of + * one Receive Queue Entry by the HBA. When the HBA indicates that it has + * consumed an entry the host calls this function to update the queue's + * internal pointers. This routine returns the number of entries that were + * consumed by the HBA. + **/ +static uint32_t +lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) +{ + if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) + return 0; + hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); + dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); + return 1; +} + /** * lpfc_cmd_iocb - Get next command iocb entry in the ring * @phba: Pointer to HBA context object. @@ -121,6 +451,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) } /** + * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. + * @phba: Pointer to HBA context object. + * @xritag: XRI value. + * + * This function clears the sglq pointer from the array of acive + * sglq's. The xritag that is passed in is used to index into the + * array. Before the xritag can be used it needs to be adjusted + * by subtracting the xribase. + * + * Returns sglq ponter = success, NULL = Failure. + **/ +static struct lpfc_sglq * +__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) +{ + uint16_t adj_xri; + struct lpfc_sglq *sglq; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; + if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) + return NULL; + sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; + phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL; + return sglq; +} + +/** + * __lpfc_get_active_sglq - Get the active sglq for this XRI. + * @phba: Pointer to HBA context object. + * @xritag: XRI value. + * + * This function returns the sglq pointer from the array of acive + * sglq's. The xritag that is passed in is used to index into the + * array. Before the xritag can be used it needs to be adjusted + * by subtracting the xribase. + * + * Returns sglq ponter = success, NULL = Failure. + **/ +static struct lpfc_sglq * +__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) +{ + uint16_t adj_xri; + struct lpfc_sglq *sglq; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; + if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) + return NULL; + sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; + return sglq; +} + +/** + * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool + * @phba: Pointer to HBA context object. + * + * This function is called with hbalock held. This function + * Gets a new driver sglq object from the sglq list. If the + * list is not empty then it is successful, it returns pointer to the newly + * allocated sglq object else it returns NULL. + **/ +static struct lpfc_sglq * +__lpfc_sli_get_sglq(struct lpfc_hba *phba) +{ + struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; + struct lpfc_sglq *sglq = NULL; + uint16_t adj_xri; + list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); + adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; + phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; + return sglq; +} + +/** * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool * @phba: Pointer to HBA context object. * @@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) } /** - * __lpfc_sli_release_iocbq - Release iocb to the iocb pool + * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. * @@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) * iocb object to the iocb pool. The iotag in the iocb object * does not change for each use of the iocb object. This function * clears all other fields of the iocb object when it is freed. + * The sqlq structure that holds the xritag and phys and virtual + * mappings for the scatter gather list is retrieved from the + * active array of sglq. The get of the sglq pointer also clears + * the entry in the array. If the status of the IO indiactes that + * this IO was aborted then the sglq entry it put on the + * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the + * IO has good status or fails for any other reason then the sglq + * entry is added to the free list (lpfc_sgl_list). **/ static void -__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + struct lpfc_sglq *sglq; + size_t start_clean = offsetof(struct lpfc_iocbq, iocb); + unsigned long iflag; + + if (iocbq->sli4_xritag == NO_XRI) + sglq = NULL; + else + sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); + if (sglq) { + if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED + || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) + && (iocbq->iocb.un.ulpWord[4] + == IOERR_SLI_ABORTED))) { + spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, + iflag); + list_add(&sglq->list, + &phba->sli4_hba.lpfc_abts_els_sgl_list); + spin_unlock_irqrestore( + &phba->sli4_hba.abts_sgl_list_lock, iflag); + } else + list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + } + + + /* + * Clean all volatile data fields, preserve iotag and node struct. + */ + memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); + iocbq->sli4_xritag = NO_XRI; + list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); +} + +/** + * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function is called with hbalock held to release driver + * iocb object to the iocb pool. The iotag in the iocb object + * does not change for each use of the iocb object. This function + * clears all other fields of the iocb object when it is freed. + **/ +static void +__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { size_t start_clean = offsetof(struct lpfc_iocbq, iocb); @@ -160,10 +613,27 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) * Clean all volatile data fields, preserve iotag and node struct. */ memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); + iocbq->sli4_xritag = NO_XRI; list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } /** + * __lpfc_sli_release_iocbq - Release iocb to the iocb pool + * @phba: Pointer to HBA context object. + * @iocbq: Pointer to driver iocb object. + * + * This function is called with hbalock held to release driver + * iocb object to the iocb pool. The iotag in the iocb object + * does not change for each use of the iocb object. This function + * clears all other fields of the iocb object when it is freed. + **/ +static void +__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +{ + phba->__lpfc_sli_release_iocbq(phba, iocbq); +} + +/** * lpfc_sli_release_iocbq - Release iocb to the iocb pool * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. @@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case CMD_GEN_REQUEST64_CR: case CMD_GEN_REQUEST64_CX: case CMD_XMIT_ELS_RSP64_CX: + case DSSCMD_IWRITE64_CR: + case DSSCMD_IWRITE64_CX: + case DSSCMD_IREAD64_CR: + case DSSCMD_IREAD64_CX: + case DSSCMD_INVALIDATE_DEK: + case DSSCMD_SET_KEK: + case DSSCMD_GET_KEK_ID: + case DSSCMD_GEN_XFER: type = LPFC_SOL_IOCB; break; case CMD_ABORT_XRI_CN: @@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba) pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) return -ENOMEM; - pmbox = &pmb->mb; + pmbox = &pmb->u.mb; phba->link_state = LPFC_INIT_MBX_CMDS; for (i = 0; i < psli->num_rings; i++) { lpfc_config_ring(phba, i, pmb); @@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) phba->hbqs[i].buffer_count = 0; } /* Return all HBQ buffer that are in-fly */ - list_for_each_entry_safe(dmabuf, next_dmabuf, - &phba->hbqbuf_in_list, list) { + list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, + list) { hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); list_del(&hbq_buf->dbuf.list); if (hbq_buf->tag == -1) { @@ -814,10 +1292,28 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) * pointer to the hbq entry if it successfully post the buffer * else it will return NULL. **/ -static struct lpfc_hbq_entry * +static int lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, struct hbq_dmabuf *hbq_buf) { + return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); +} + +/** + * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * @hbq_buf: Pointer to HBQ buffer. + * + * This function is called with the hbalock held to post a hbq buffer to the + * firmware. If the function finds an empty slot in the HBQ, it will post the + * buffer and place it on the hbq_buffer_list. The function will return zero if + * it successfully post the buffer else it will return an error. + **/ +static int +lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, + struct hbq_dmabuf *hbq_buf) +{ struct lpfc_hbq_entry *hbqe; dma_addr_t physaddr = hbq_buf->dbuf.phys; @@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, /* flush */ readl(phba->hbq_put + hbqno); list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); - } - return hbqe; + return 0; + } else + return -ENOMEM; +} + +/** + * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * @hbq_buf: Pointer to HBQ buffer. + * + * This function is called with the hbalock held to post an RQE to the SLI4 + * firmware. If able to post the RQE to the RQ it will queue the hbq entry to + * the hbq_buffer_list and return zero, otherwise it will return an error. + **/ +static int +lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, + struct hbq_dmabuf *hbq_buf) +{ + int rc; + struct lpfc_rqe hrqe; + struct lpfc_rqe drqe; + + hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); + hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); + drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); + drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); + rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, + &hrqe, &drqe); + if (rc < 0) + return rc; + hbq_buf->tag = rc; + list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); + return 0; } /* HBQ for ELS and CT traffic. */ @@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) dbuf.list); hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | (hbqno << 16)); - if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { + if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { phba->hbqs[hbqno].buffer_count++; posted++; } else @@ -965,6 +1493,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) } /** + * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list + * @phba: Pointer to HBA context object. + * @hbqno: HBQ number. + * + * This function removes the first hbq buffer on an hbq list and returns a + * pointer to that buffer. If it finds no buffers on the list it returns NULL. + **/ +static struct hbq_dmabuf * +lpfc_sli_hbqbuf_get(struct list_head *rb_list) +{ + struct lpfc_dmabuf *d_buf; + + list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); + if (!d_buf) + return NULL; + return container_of(d_buf, struct hbq_dmabuf, dbuf); +} + +/** * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag * @phba: Pointer to HBA context object. * @tag: Tag of the hbq buffer. @@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) if (hbqno >= LPFC_MAX_HBQS) return NULL; + spin_lock_irq(&phba->hbalock); list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); if (hbq_buf->tag == tag) { + spin_unlock_irq(&phba->hbalock); return hbq_buf; } } + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, "1803 Bad hbq tag. Data: x%x x%x\n", tag, phba->hbqs[tag >> 16].buffer_count); @@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) if (hbq_buffer) { hbqno = hbq_buffer->tag >> 16; - if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { + if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); - } } } @@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_HEARTBEAT: case MBX_PORT_CAPABILITIES: case MBX_PORT_IOV_CONTROL: + case MBX_SLI4_CONFIG: + case MBX_SLI4_REQ_FTRS: + case MBX_REG_FCFI: + case MBX_UNREG_FCFI: + case MBX_REG_VFI: + case MBX_UNREG_VFI: + case MBX_INIT_VPI: + case MBX_INIT_VFI: + case MBX_RESUME_RPI: ret = mbxCommand; break; default: @@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) * will wake up thread waiting on the wait queue pointed by context1 * of the mailbox. **/ -static void +void lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { wait_queue_head_t *pdone_q; @@ -1140,7 +1698,7 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_dmabuf *mp; - uint16_t rpi; + uint16_t rpi, vpi; int rc; mp = (struct lpfc_dmabuf *) (pmb->context1); @@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) kfree(mp); } + if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && + (phba->sli_rev == LPFC_SLI_REV4)) + lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); + /* * If a REG_LOGIN succeeded after node is destroyed or node * is in re-discovery driver need to cleanup the RPI. */ if (!(phba->pport->load_flag & FC_UNLOADING) && - pmb->mb.mbxCommand == MBX_REG_LOGIN64 && - !pmb->mb.mbxStatus) { - - rpi = pmb->mb.un.varWords[0]; - lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); + pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && + !pmb->u.mb.mbxStatus) { + rpi = pmb->u.mb.un.varWords[0]; + vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; + lpfc_unreg_login(phba, vpi, rpi, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_NOT_FINISHED) return; } - mempool_free(pmb, phba->mbox_mem_pool); - return; + if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) + lpfc_sli4_mbox_cmd_free(phba, pmb); + else + mempool_free(pmb, phba->mbox_mem_pool); } /** @@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) if (pmb == NULL) break; - pmbox = &pmb->mb; + pmbox = &pmb->u.mb; if (pmbox->mbxCommand != MBX_HEARTBEAT) { if (pmb->vport) { @@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Unknow mailbox command compl */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):0323 Unknown Mailbox command " - "%x Cmpl\n", + "x%x (x%x) Cmpl\n", pmb->vport ? pmb->vport->vpi : 0, - pmbox->mbxCommand); + pmbox->mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, pmb)); phba->link_state = LPFC_HBA_ERROR; phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); @@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) LOG_MBOX | LOG_SLI, "(%d):0305 Mbox cmd cmpl " "error - RETRYing Data: x%x " - "x%x x%x x%x\n", + "(x%x) x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi :0, pmbox->mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, + pmb), pmbox->mbxStatus, pmbox->un.varWords[0], pmb->vport->port_state); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; - spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc == MBX_SUCCESS) + if (rc != MBX_NOT_FINISHED) continue; } } /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0307 Mailbox cmd x%x Cmpl x%p " + "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, pmb), pmb->mbox_cmpl, *((uint32_t *) pmbox), pmbox->un.varWords[0], @@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba, return &hbq_entry->dbuf; } +/** + * lpfc_complete_unsol_iocb - Complete an unsolicited sequence + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @saveq: Pointer to the iocbq struct representing the sequence starting frame. + * @fch_r_ctl: the r_ctl for the first frame of the sequence. + * @fch_type: the type for the first frame of the sequence. + * + * This function is called with no lock held. This function uses the r_ctl and + * type of the received sequence to find the correct callback function to call + * to process the sequence. + **/ +static int +lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, + uint32_t fch_type) +{ + int i; + + /* unSolicited Responses */ + if (pring->prt[0].profile) { + if (pring->prt[0].lpfc_sli_rcv_unsol_event) + (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, + saveq); + return 1; + } + /* We must search, based on rctl / type + for the right routine */ + for (i = 0; i < pring->num_mask; i++) { + if ((pring->prt[i].rctl == fch_r_ctl) && + (pring->prt[i].type == fch_type)) { + if (pring->prt[i].lpfc_sli_rcv_unsol_event) + (pring->prt[i].lpfc_sli_rcv_unsol_event) + (phba, pring, saveq); + return 1; + } + } + return 0; +} /** * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler @@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t * irsp; WORD5 * w5p; uint32_t Rctl, Type; - uint32_t match, i; + uint32_t match; struct lpfc_iocbq *iocbq; struct lpfc_dmabuf *dmzbuf; @@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - /* unSolicited Responses */ - if (pring->prt[0].profile) { - if (pring->prt[0].lpfc_sli_rcv_unsol_event) - (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, - saveq); - match = 1; - } else { - /* We must search, based on rctl / type - for the right routine */ - for (i = 0; i < pring->num_mask; i++) { - if ((pring->prt[i].rctl == Rctl) - && (pring->prt[i].type == Type)) { - if (pring->prt[i].lpfc_sli_rcv_unsol_event) - (pring->prt[i].lpfc_sli_rcv_unsol_event) - (phba, pring, saveq); - match = 1; - break; - } - } - } - if (match == 0) { - /* Unexpected Rctl / Type received */ - /* Ring <ringno> handler: unexpected - Rctl <Rctl> Type <Type> received */ + if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0313 Ring %d handler: unexpected Rctl x%x " "Type x%x received\n", pring->ringno, Rctl, Type); - } + return 1; } @@ -1552,6 +2133,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, } /** + * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @iotag: IOCB tag. + * + * This function looks up the iocb_lookup table to get the command iocb + * corresponding to the given iotag. This function is called with the + * hbalock held. + * This function returns the command iocb object if it finds the command + * iocb else returns NULL. + **/ +static struct lpfc_iocbq * +lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint16_t iotag) +{ + struct lpfc_iocbq *cmd_iocb; + + if (iotag != 0 && iotag <= phba->sli.last_iotag) { + cmd_iocb = phba->sli.iocbq_lookup[iotag]; + list_del_init(&cmd_iocb->list); + pring->txcmplq_cnt--; + return cmd_iocb; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0372 iotag x%x is out off range: max iotag (x%x)\n", + iotag, phba->sli.last_iotag); + return NULL; +} + +/** * lpfc_sli_process_sol_iocb - process solicited iocb completion * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. @@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_rampdown_queue_depth(phba); + phba->lpfc_rampdown_queue_depth(phba); spin_lock_irqsave(&phba->hbalock, iflag); } @@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, } /** - * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings + * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @rspiocbp: Pointer to driver response IOCB object. + * + * This function is called from the worker thread when there is a slow-path + * response IOCB to process. This function chains all the response iocbs until + * seeing the iocb with the LE bit set. The function will call + * lpfc_sli_process_sol_iocb function if the response iocb indicates a + * completion of a command iocb. The function will call the + * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. + * The function frees the resources or calls the completion handler if this + * iocb is an abort completion. The function returns NULL when the response + * iocb has the LE bit set and all the chained iocbs are processed, otherwise + * this function shall chain the iocb on to the iocb_continueq and return the + * response iocb passed in. + **/ +static struct lpfc_iocbq * +lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *rspiocbp) +{ + struct lpfc_iocbq *saveq; + struct lpfc_iocbq *cmdiocbp; + struct lpfc_iocbq *next_iocb; + IOCB_t *irsp = NULL; + uint32_t free_saveq; + uint8_t iocb_cmd_type; + lpfc_iocb_type type; + unsigned long iflag; + int rc; + + spin_lock_irqsave(&phba->hbalock, iflag); + /* First add the response iocb to the countinueq list */ + list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); + pring->iocb_continueq_cnt++; + + /* Now, determine whetehr the list is completed for processing */ + irsp = &rspiocbp->iocb; + if (irsp->ulpLe) { + /* + * By default, the driver expects to free all resources + * associated with this iocb completion. + */ + free_saveq = 1; + saveq = list_get_first(&pring->iocb_continueq, + struct lpfc_iocbq, list); + irsp = &(saveq->iocb); + list_del_init(&pring->iocb_continueq); + pring->iocb_continueq_cnt = 0; + + pring->stats.iocb_rsp++; + + /* + * If resource errors reported from HBA, reduce + * queuedepths of the SCSI device. + */ + if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + phba->lpfc_rampdown_queue_depth(phba); + spin_lock_irqsave(&phba->hbalock, iflag); + } + + if (irsp->ulpStatus) { + /* Rsp ring <ringno> error: IOCB */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0328 Rsp Ring %d error: " + "IOCB Data: " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x\n", + pring->ringno, + irsp->un.ulpWord[0], + irsp->un.ulpWord[1], + irsp->un.ulpWord[2], + irsp->un.ulpWord[3], + irsp->un.ulpWord[4], + irsp->un.ulpWord[5], + *(((uint32_t *) irsp) + 6), + *(((uint32_t *) irsp) + 7), + *(((uint32_t *) irsp) + 8), + *(((uint32_t *) irsp) + 9), + *(((uint32_t *) irsp) + 10), + *(((uint32_t *) irsp) + 11), + *(((uint32_t *) irsp) + 12), + *(((uint32_t *) irsp) + 13), + *(((uint32_t *) irsp) + 14), + *(((uint32_t *) irsp) + 15)); + } + + /* + * Fetch the IOCB command type and call the correct completion + * routine. Solicited and Unsolicited IOCBs on the ELS ring + * get freed back to the lpfc_iocb_list by the discovery + * kernel thread. + */ + iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; + type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); + switch (type) { + case LPFC_SOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + break; + + case LPFC_UNSOL_IOCB: + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + if (!rc) + free_saveq = 0; + break; + + case LPFC_ABORT_IOCB: + cmdiocbp = NULL; + if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) + cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, + saveq); + if (cmdiocbp) { + /* Call the specified completion routine */ + if (cmdiocbp->iocb_cmpl) { + spin_unlock_irqrestore(&phba->hbalock, + iflag); + (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, + saveq); + spin_lock_irqsave(&phba->hbalock, + iflag); + } else + __lpfc_sli_release_iocbq(phba, + cmdiocbp); + } + break; + + case LPFC_UNKNOWN_IOCB: + if (irsp->ulpCommand == CMD_ADAPTER_MSG) { + char adaptermsg[LPFC_MAX_ADPTMSG]; + memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); + memcpy(&adaptermsg[0], (uint8_t *)irsp, + MAX_MSG_DATA); + dev_warn(&((phba->pcidev)->dev), + "lpfc%d: %s\n", + phba->brd_no, adaptermsg); + } else { + /* Unknown IOCB command */ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0335 Unknown IOCB " + "command Data: x%x " + "x%x x%x x%x\n", + irsp->ulpCommand, + irsp->ulpStatus, + irsp->ulpIoTag, + irsp->ulpContext); + } + break; + } + + if (free_saveq) { + list_for_each_entry_safe(rspiocbp, next_iocb, + &saveq->list, list) { + list_del(&rspiocbp->list); + __lpfc_sli_release_iocbq(phba, rspiocbp); + } + __lpfc_sli_release_iocbq(phba, saveq); + } + rspiocbp = NULL; + } + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rspiocbp; +} + +/** + * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @mask: Host attention register mask for this ring. * - * This function is called from the worker thread when there is a ring - * event for non-fcp rings. The caller does not hold any lock . - * The function processes each response iocb in the response ring until it - * finds an iocb with LE bit set and chains all the iocbs upto the iocb with - * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the - * response iocb indicates a completion of a command iocb. The function - * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited - * iocb. The function frees the resources or calls the completion handler if - * this iocb is an abort completion. The function returns 0 when the allocated - * iocbs are not freed, otherwise returns 1. + * This routine wraps the actual slow_ring event process routine from the + * API jump table function pointer from the lpfc_hba struct. **/ -int +void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) { + phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); +} + +/** + * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mask: Host attention register mask for this ring. + * + * This function is called from the worker thread when there is a ring event + * for non-fcp rings. The caller does not hold any lock. The function will + * remove each response iocb in the response ring and calls the handle + * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. + **/ +static void +lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) +{ struct lpfc_pgp *pgp; IOCB_t *entry; IOCB_t *irsp = NULL; struct lpfc_iocbq *rspiocbp = NULL; - struct lpfc_iocbq *next_iocb; - struct lpfc_iocbq *cmdiocbp; - struct lpfc_iocbq *saveq; - uint8_t iocb_cmd_type; - lpfc_iocb_type type; - uint32_t status, free_saveq; uint32_t portRspPut, portRspMax; - int rc = 1; unsigned long iflag; + uint32_t status; pgp = &phba->port_gp[pring->ringno]; spin_lock_irqsave(&phba->hbalock, iflag); @@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); - return 1; + return; } rmb(); @@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); - list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); - - pring->iocb_continueq_cnt++; - if (irsp->ulpLe) { - /* - * By default, the driver expects to free all resources - * associated with this iocb completion. - */ - free_saveq = 1; - saveq = list_get_first(&pring->iocb_continueq, - struct lpfc_iocbq, list); - irsp = &(saveq->iocb); - list_del_init(&pring->iocb_continueq); - pring->iocb_continueq_cnt = 0; - - pring->stats.iocb_rsp++; - - /* - * If resource errors reported from HBA, reduce - * queuedepths of the SCSI device. - */ - if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_rampdown_queue_depth(phba); - spin_lock_irqsave(&phba->hbalock, iflag); - } - - if (irsp->ulpStatus) { - /* Rsp ring <ringno> error: IOCB */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0328 Rsp Ring %d error: " - "IOCB Data: " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x\n", - pring->ringno, - irsp->un.ulpWord[0], - irsp->un.ulpWord[1], - irsp->un.ulpWord[2], - irsp->un.ulpWord[3], - irsp->un.ulpWord[4], - irsp->un.ulpWord[5], - *(((uint32_t *) irsp) + 6), - *(((uint32_t *) irsp) + 7), - *(((uint32_t *) irsp) + 8), - *(((uint32_t *) irsp) + 9), - *(((uint32_t *) irsp) + 10), - *(((uint32_t *) irsp) + 11), - *(((uint32_t *) irsp) + 12), - *(((uint32_t *) irsp) + 13), - *(((uint32_t *) irsp) + 14), - *(((uint32_t *) irsp) + 15)); - } - - /* - * Fetch the IOCB command type and call the correct - * completion routine. Solicited and Unsolicited - * IOCBs on the ELS ring get freed back to the - * lpfc_iocb_list by the discovery kernel thread. - */ - iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; - type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); - if (type == LPFC_SOL_IOCB) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - rc = lpfc_sli_process_sol_iocb(phba, pring, - saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - } else if (type == LPFC_UNSOL_IOCB) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - rc = lpfc_sli_process_unsol_iocb(phba, pring, - saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - if (!rc) - free_saveq = 0; - } else if (type == LPFC_ABORT_IOCB) { - if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && - ((cmdiocbp = - lpfc_sli_iocbq_lookup(phba, pring, - saveq)))) { - /* Call the specified completion - routine */ - if (cmdiocbp->iocb_cmpl) { - spin_unlock_irqrestore( - &phba->hbalock, - iflag); - (cmdiocbp->iocb_cmpl) (phba, - cmdiocbp, saveq); - spin_lock_irqsave( - &phba->hbalock, - iflag); - } else - __lpfc_sli_release_iocbq(phba, - cmdiocbp); - } - } else if (type == LPFC_UNKNOWN_IOCB) { - if (irsp->ulpCommand == CMD_ADAPTER_MSG) { - - char adaptermsg[LPFC_MAX_ADPTMSG]; - - memset(adaptermsg, 0, - LPFC_MAX_ADPTMSG); - memcpy(&adaptermsg[0], (uint8_t *) irsp, - MAX_MSG_DATA); - dev_warn(&((phba->pcidev)->dev), - "lpfc%d: %s\n", - phba->brd_no, adaptermsg); - } else { - /* Unknown IOCB command */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0335 Unknown IOCB " - "command Data: x%x " - "x%x x%x x%x\n", - irsp->ulpCommand, - irsp->ulpStatus, - irsp->ulpIoTag, - irsp->ulpContext); - } - } - - if (free_saveq) { - list_for_each_entry_safe(rspiocbp, next_iocb, - &saveq->list, list) { - list_del(&rspiocbp->list); - __lpfc_sli_release_iocbq(phba, - rspiocbp); - } - __lpfc_sli_release_iocbq(phba, saveq); - } - rspiocbp = NULL; - } + spin_unlock_irqrestore(&phba->hbalock, iflag); + /* Handle the response IOCB */ + rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); + spin_lock_irqsave(&phba->hbalock, iflag); /* * If the port response put pointer has not been updated, sync @@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, } spin_unlock_irqrestore(&phba->hbalock, iflag); - return rc; + return; +} + +/** + * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @mask: Host attention register mask for this ring. + * + * This function is called from the worker thread when there is a pending + * ELS response iocb on the driver internal slow-path response iocb worker + * queue. The caller does not hold any lock. The function will remove each + * response iocb from the response worker queue and calls the handle + * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. + **/ +static void +lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, + struct lpfc_sli_ring *pring, uint32_t mask) +{ + struct lpfc_iocbq *irspiocbq; + unsigned long iflag; + + while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { + /* Get the response iocb from the head of work queue */ + spin_lock_irqsave(&phba->hbalock, iflag); + list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, + irspiocbq, struct lpfc_iocbq, list); + spin_unlock_irqrestore(&phba->hbalock, iflag); + /* Process the response iocb */ + lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); + } } /** @@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) } /** - * lpfc_sli_brdready - Check for host status bits + * lpfc_sli_brdready_s3 - Check for sli3 host ready status * @phba: Pointer to HBA context object. * @mask: Bit mask to be checked. * @@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) * function returns 1 when HBA fail to restart otherwise returns * zero. **/ -int -lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) +static int +lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) { uint32_t status; int i = 0; @@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) return retval; } +/** + * lpfc_sli_brdready_s4 - Check for sli4 host ready status + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This function checks the host status register to check if HBA is + * ready. This function will wait in a loop for the HBA to be ready + * If the HBA is not ready , the function will will reset the HBA PCI + * function again. The function returns 1 when HBA fail to be ready + * otherwise returns zero. + **/ +static int +lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) +{ + uint32_t status; + int retval = 0; + + /* Read the HBA Host Status Register */ + status = lpfc_sli4_post_status_check(phba); + + if (status) { + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + lpfc_sli_brdrestart(phba); + status = lpfc_sli4_post_status_check(phba); + } + + /* Check to see if any errors occurred during init */ + if (status) { + phba->link_state = LPFC_HBA_ERROR; + retval = 1; + } else + phba->sli4_hba.intr_enable = 0; + + return retval; +} + +/** + * lpfc_sli_brdready - Wrapper func for checking the hba readyness + * @phba: Pointer to HBA context object. + * @mask: Bit mask to be checked. + * + * This routine wraps the actual SLI3 or SLI4 hba readyness check routine + * from the API jump table function pointer from the lpfc_hba struct. + **/ +int +lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) +{ + return phba->lpfc_sli_brdready(phba, mask); +} + #define BARRIER_TEST_PATTERN (0xdeadbeef) /** @@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) mdelay(1); if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { - if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || + if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || phba->pport->stopped) goto restore_hc; else @@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) return 1; } - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); @@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) } spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + psli->mbox_active = NULL; phba->link_flag &= ~LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); - psli->mbox_active = NULL; lpfc_hba_down_post(phba); phba->link_state = LPFC_HBA_ERROR; @@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) } /** - * lpfc_sli_brdreset - Reset the HBA + * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA * @phba: Pointer to HBA context object. * * This function resets the HBA by writing HC_INITFF to the control @@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); + psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); + /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); @@ -2710,7 +3453,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) } /** - * lpfc_sli_brdrestart - Restart the HBA + * lpfc_sli4_brdreset - Reset a sli-4 HBA + * @phba: Pointer to HBA context object. + * + * This function resets a SLI4 HBA. This function disables PCI layer parity + * checking during resets the device. The caller is not required to hold + * any locks. + * + * This function returns 0 always. + **/ +int +lpfc_sli4_brdreset(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + uint16_t cfg_value; + uint8_t qindx; + + /* Reset HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0295 Reset HBA Data: x%x x%x\n", + phba->pport->port_state, psli->sli_flag); + + /* perform board reset */ + phba->fc_eventTag = 0; + phba->pport->fc_myDID = 0; + phba->pport->fc_prevDID = 0; + + /* Turn off parity checking and serr during the physical reset */ + pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); + pci_write_config_word(phba->pcidev, PCI_COMMAND, + (cfg_value & + ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); + + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~(LPFC_PROCESS_LA); + phba->fcf.fcf_flag = 0; + /* Clean up the child queue list for the CQs */ + list_del_init(&phba->sli4_hba.mbx_wq->list); + list_del_init(&phba->sli4_hba.els_wq->list); + list_del_init(&phba->sli4_hba.hdr_rq->list); + list_del_init(&phba->sli4_hba.dat_rq->list); + list_del_init(&phba->sli4_hba.mbx_cq->list); + list_del_init(&phba->sli4_hba.els_cq->list); + list_del_init(&phba->sli4_hba.rxq_cq->list); + for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) + list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); + for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) + list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); + spin_unlock_irq(&phba->hbalock); + + /* Now physically reset the device */ + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0389 Performing PCI function reset!\n"); + /* Perform FCoE PCI function reset */ + lpfc_pci_function_reset(phba); + + return 0; +} + +/** + * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba * @phba: Pointer to HBA context object. * * This function is called in the SLI initialization code path to @@ -2722,8 +3524,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) * The function does not guarantee completion of MBX_RESTART mailbox * command before the return of this function. **/ -int -lpfc_sli_brdrestart(struct lpfc_hba *phba) +static int +lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) { MAILBOX_t *mb; struct lpfc_sli *psli; @@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) lpfc_sli_brdreset(phba); phba->pport->stopped = 0; phba->link_state = LPFC_INIT_START; - + phba->hba_flag = 0; spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); @@ -2777,6 +3579,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) } /** + * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI initialization code path to restart + * a SLI4 HBA. The caller is not required to hold any lock. + * At the end of the function, it calls lpfc_hba_down_post function to + * free any pending commands. + **/ +static int +lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + + + /* Restart HBA */ + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0296 Restart HBA Data: x%x x%x\n", + phba->pport->port_state, psli->sli_flag); + + lpfc_sli4_brdreset(phba); + + spin_lock_irq(&phba->hbalock); + phba->pport->stopped = 0; + phba->link_state = LPFC_INIT_START; + phba->hba_flag = 0; + spin_unlock_irq(&phba->hbalock); + + memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); + psli->stats_start = get_seconds(); + + lpfc_hba_down_post(phba); + + return 0; +} + +/** + * lpfc_sli_brdrestart - Wrapper func for restarting hba + * @phba: Pointer to HBA context object. + * + * This routine wraps the actual SLI3 or SLI4 hba restart routine from the + * API jump table function pointer from the lpfc_hba struct. +**/ +int +lpfc_sli_brdrestart(struct lpfc_hba *phba) +{ + return phba->lpfc_sli_brdrestart(phba); +} + +/** * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart * @phba: Pointer to HBA context object. * @@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) if (!pmb) return -ENOMEM; - pmbox = &pmb->mb; + pmbox = &pmb->u.mb; /* Initialize the struct lpfc_sli_hbq structure for each hbq */ phba->link_state = LPFC_INIT_MBX_CMDS; @@ -2984,6 +3835,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) } /** + * lpfc_sli4_rb_setup - Initialize and post RBs to HBA + * @phba: Pointer to HBA context object. + * + * This function is called during the SLI initialization to configure + * all the HBQs and post buffers to the HBQ. The caller is not + * required to hold any locks. This function will return zero if successful + * else it will return negative error code. + **/ +static int +lpfc_sli4_rb_setup(struct lpfc_hba *phba) +{ + phba->hbq_in_use = 1; + phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; + phba->hbq_count = 1; + /* Initially populate or replenish the HBQs */ + lpfc_sli_hbqbuf_init_hbqs(phba, 0); + return 0; +} + +/** * lpfc_sli_config_port - Issue config port mailbox command * @phba: Pointer to HBA context object. * @sli_mode: sli mode - 2/3 @@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0442 Adapter failed to init, mbxCmd x%x " "CONFIG_PORT, mbxStatus x%x Data: x%x\n", - pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); + pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; + phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); rc = -ENXIO; - } else + } else { + /* Allow asynchronous mailbox command to go through */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); done = 1; + } } if (!done) { rc = -EINVAL; goto do_prep_failed; } - if (pmb->mb.un.varCfgPort.sli_mode == 3) { - if (!pmb->mb.un.varCfgPort.cMA) { + if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { + if (!pmb->u.mb.un.varCfgPort.cMA) { rc = -ENXIO; goto do_prep_failed; } - if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { + if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; - phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; + phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; + phba->max_vports = (phba->max_vpi > phba->max_vports) ? + phba->max_vpi : phba->max_vports; + } else phba->max_vpi = 0; - if (pmb->mb.un.varCfgPort.gerbm) + if (pmb->u.mb.un.varCfgPort.gdss) + phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; + if (pmb->u.mb.un.varCfgPort.gerbm) phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; - if (pmb->mb.un.varCfgPort.gcrp) + if (pmb->u.mb.un.varCfgPort.gcrp) phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; - if (pmb->mb.un.varCfgPort.ginb) { + if (pmb->u.mb.un.varCfgPort.ginb) { phba->sli3_options |= LPFC_SLI3_INB_ENABLED; phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; phba->port_gp = phba->mbox->us.s3_inb_pgp.port; @@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) } if (phba->cfg_enable_bg) { - if (pmb->mb.un.varCfgPort.gbg) + if (pmb->u.mb.un.varCfgPort.gbg) phba->sli3_options |= LPFC_SLI3_BG_ENABLED; else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) if (rc) goto lpfc_sli_hba_setup_error; } - + spin_lock_irq(&phba->hbalock); phba->sli.sli_flag |= LPFC_PROCESS_LA; + spin_unlock_irq(&phba->hbalock); rc = lpfc_config_port_post(phba); if (rc) @@ -3200,6 +4082,488 @@ lpfc_sli_hba_setup_error: return rc; } +/** + * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region + * @phba: Pointer to HBA context object. + * @mboxq: mailbox pointer. + * This function issue a dump mailbox command to read config region + * 23 and parse the records in the region and populate driver + * data structure. + **/ +static int +lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, + LPFC_MBOXQ_t *mboxq) +{ + struct lpfc_dmabuf *mp; + struct lpfc_mqe *mqe; + uint32_t data_length; + int rc; + + /* Program the default value of vlan_id and fc_map */ + phba->valid_vlan = 0; + phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; + phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; + phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; + + mqe = &mboxq->u.mqe; + if (lpfc_dump_fcoe_param(phba, mboxq)) + return -ENOMEM; + + mp = (struct lpfc_dmabuf *) mboxq->context1; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):2571 Mailbox cmd x%x Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_command, mqe), + bf_get(lpfc_mqe_status, mqe), + mqe->un.mb_words[0], mqe->un.mb_words[1], + mqe->un.mb_words[2], mqe->un.mb_words[3], + mqe->un.mb_words[4], mqe->un.mb_words[5], + mqe->un.mb_words[6], mqe->un.mb_words[7], + mqe->un.mb_words[8], mqe->un.mb_words[9], + mqe->un.mb_words[10], mqe->un.mb_words[11], + mqe->un.mb_words[12], mqe->un.mb_words[13], + mqe->un.mb_words[14], mqe->un.mb_words[15], + mqe->un.mb_words[16], mqe->un.mb_words[50], + mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); + + if (rc) { + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + return -EIO; + } + data_length = mqe->un.mb_words[5]; + if (data_length > DMP_FCOEPARAM_RGN_SIZE) + return -EIO; + + lpfc_parse_fcoe_conf(phba, mp->virt, data_length); + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + return 0; +} + +/** + * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to the LPFC_MBOXQ_t structure. + * @vpd: pointer to the memory to hold resulting port vpd data. + * @vpd_size: On input, the number of bytes allocated to @vpd. + * On output, the number of data bytes in @vpd. + * + * This routine executes a READ_REV SLI4 mailbox command. In + * addition, this routine gets the port vpd data. + * + * Return codes + * 0 - sucessful + * ENOMEM - could not allocated memory. + **/ +static int +lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint8_t *vpd, uint32_t *vpd_size) +{ + int rc = 0; + uint32_t dma_size; + struct lpfc_dmabuf *dmabuf; + struct lpfc_mqe *mqe; + + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + return -ENOMEM; + + /* + * Get a DMA buffer for the vpd data resulting from the READ_REV + * mailbox command. + */ + dma_size = *vpd_size; + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + dma_size, + &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + return -ENOMEM; + } + memset(dmabuf->virt, 0, dma_size); + + /* + * The SLI4 implementation of READ_REV conflicts at word1, + * bits 31:16 and SLI4 adds vpd functionality not present + * in SLI3. This code corrects the conflicts. + */ + lpfc_read_rev(phba, mboxq); + mqe = &mboxq->u.mqe; + mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); + mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); + mqe->un.read_rev.word1 &= 0x0000FFFF; + bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); + bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); + + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc) { + dma_free_coherent(&phba->pcidev->dev, dma_size, + dmabuf->virt, dmabuf->phys); + return -EIO; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0380 Mailbox cmd x%x Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + bf_get(lpfc_mqe_command, mqe), + bf_get(lpfc_mqe_status, mqe), + mqe->un.mb_words[0], mqe->un.mb_words[1], + mqe->un.mb_words[2], mqe->un.mb_words[3], + mqe->un.mb_words[4], mqe->un.mb_words[5], + mqe->un.mb_words[6], mqe->un.mb_words[7], + mqe->un.mb_words[8], mqe->un.mb_words[9], + mqe->un.mb_words[10], mqe->un.mb_words[11], + mqe->un.mb_words[12], mqe->un.mb_words[13], + mqe->un.mb_words[14], mqe->un.mb_words[15], + mqe->un.mb_words[16], mqe->un.mb_words[50], + mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); + + /* + * The available vpd length cannot be bigger than the + * DMA buffer passed to the port. Catch the less than + * case and update the caller's size. + */ + if (mqe->un.read_rev.avail_vpd_len < *vpd_size) + *vpd_size = mqe->un.read_rev.avail_vpd_len; + + lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size); + dma_free_coherent(&phba->pcidev->dev, dma_size, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + return 0; +} + +/** + * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is called to explicitly arm the SLI4 device's completion and + * event queues + **/ +static void +lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) +{ + uint8_t fcp_eqidx; + + lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); + lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); + lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM); + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) + lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], + LPFC_QUEUE_REARM); + lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) + lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], + LPFC_QUEUE_REARM); +} + +/** + * lpfc_sli4_hba_setup - SLI4 device intialization PCI function + * @phba: Pointer to HBA context object. + * + * This function is the main SLI4 device intialization PCI function. This + * function is called by the HBA intialization code, HBA reset code and + * HBA error attention handler code. Caller is not required to hold any + * locks. + **/ +int +lpfc_sli4_hba_setup(struct lpfc_hba *phba) +{ + int rc; + LPFC_MBOXQ_t *mboxq; + struct lpfc_mqe *mqe; + uint8_t *vpd; + uint32_t vpd_size; + uint32_t ftr_rsp = 0; + struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); + struct lpfc_vport *vport = phba->pport; + struct lpfc_dmabuf *mp; + + /* Perform a PCI function reset to start from clean */ + rc = lpfc_pci_function_reset(phba); + if (unlikely(rc)) + return -ENODEV; + + /* Check the HBA Host Status Register for readyness */ + rc = lpfc_sli4_post_status_check(phba); + if (unlikely(rc)) + return -ENODEV; + else { + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_SLI_ACTIVE; + spin_unlock_irq(&phba->hbalock); + } + + /* + * Allocate a single mailbox container for initializing the + * port. + */ + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* + * Continue initialization with default values even if driver failed + * to read FCoE param config regions + */ + if (lpfc_sli4_read_fcoe_params(phba, mboxq)) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, + "2570 Failed to read FCoE parameters \n"); + + /* Issue READ_REV to collect vpd and FW information. */ + vpd_size = PAGE_SIZE; + vpd = kzalloc(vpd_size, GFP_KERNEL); + if (!vpd) { + rc = -ENOMEM; + goto out_free_mbox; + } + + rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); + if (unlikely(rc)) + goto out_free_vpd; + + mqe = &mboxq->u.mqe; + if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, + &mqe->un.read_rev) != LPFC_SLI_REV4) || + (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0376 READ_REV Error. SLI Level %d " + "FCoE enabled %d\n", + bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), + bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)); + rc = -EIO; + goto out_free_vpd; + } + /* Single threaded at this point, no need for lock */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= HBA_FCOE_SUPPORT; + spin_unlock_irq(&phba->hbalock); + /* + * Evaluate the read rev and vpd data. Populate the driver + * state with the results. If this routine fails, the failure + * is not fatal as the driver will use generic values. + */ + rc = lpfc_parse_vpd(phba, vpd, vpd_size); + if (unlikely(!rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0377 Error %d parsing vpd. " + "Using defaults.\n", rc); + rc = 0; + } + + /* By now, we should determine the SLI revision, hard code for now */ + phba->sli_rev = LPFC_SLI_REV4; + + /* + * Discover the port's supported feature set and match it against the + * hosts requests. + */ + lpfc_request_features(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (unlikely(rc)) { + rc = -EIO; + goto out_free_vpd; + } + + /* + * The port must support FCP initiator mode as this is the + * only mode running in the host. + */ + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0378 No support for fcpi mode.\n"); + ftr_rsp++; + } + + /* + * If the port cannot support the host's requested features + * then turn off the global config parameters to disable the + * feature in the driver. This is not a fatal error. + */ + if ((phba->cfg_enable_bg) && + !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) + ftr_rsp++; + + if (phba->max_vpi && phba->cfg_enable_npiv && + !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) + ftr_rsp++; + + if (ftr_rsp) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0379 Feature Mismatch Data: x%08x %08x " + "x%x x%x x%x\n", mqe->un.req_ftrs.word2, + mqe->un.req_ftrs.word3, phba->cfg_enable_bg, + phba->cfg_enable_npiv, phba->max_vpi); + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) + phba->cfg_enable_bg = 0; + if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) + phba->cfg_enable_npiv = 0; + } + + /* These SLI3 features are assumed in SLI4 */ + spin_lock_irq(&phba->hbalock); + phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); + spin_unlock_irq(&phba->hbalock); + + /* Read the port's service parameters. */ + lpfc_read_sparam(phba, mboxq, vport->vpi); + mboxq->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + mp = (struct lpfc_dmabuf *) mboxq->context1; + if (rc == MBX_SUCCESS) { + memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); + rc = 0; + } + + /* + * This memory was allocated by the lpfc_read_sparam routine. Release + * it to the mbuf pool. + */ + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mboxq->context1 = NULL; + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0382 READ_SPARAM command failed " + "status %d, mbxStatus x%x\n", + rc, bf_get(lpfc_mqe_status, mqe)); + phba->link_state = LPFC_HBA_ERROR; + rc = -EIO; + goto out_free_vpd; + } + + if (phba->cfg_soft_wwnn) + u64_to_wwn(phba->cfg_soft_wwnn, + vport->fc_sparam.nodeName.u.wwn); + if (phba->cfg_soft_wwpn) + u64_to_wwn(phba->cfg_soft_wwpn, + vport->fc_sparam.portName.u.wwn); + memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, + sizeof(struct lpfc_name)); + memcpy(&vport->fc_portname, &vport->fc_sparam.portName, + sizeof(struct lpfc_name)); + + /* Update the fc_host data structures with new wwn. */ + fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); + fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); + + /* Register SGL pool to the device using non-embedded mailbox command */ + rc = lpfc_sli4_post_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0582 Error %d during sgl post operation", rc); + rc = -ENODEV; + goto out_free_vpd; + } + + /* Register SCSI SGL pool to the device */ + rc = lpfc_sli4_repost_scsi_sgl_list(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "0383 Error %d during scsi sgl post opeation", + rc); + /* Some Scsi buffers were moved to the abort scsi list */ + /* A pci function reset will repost them */ + rc = -ENODEV; + goto out_free_vpd; + } + + /* Post the rpi header region to the device. */ + rc = lpfc_sli4_post_all_rpi_hdrs(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0393 Error %d during rpi post operation\n", + rc); + rc = -ENODEV; + goto out_free_vpd; + } + /* Temporary initialization of lpfc_fip_flag to non-fip */ + bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); + + /* Set up all the queues to the device */ + rc = lpfc_sli4_queue_setup(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0381 Error %d during queue setup.\n ", rc); + goto out_stop_timers; + } + + /* Arm the CQs and then EQs on device */ + lpfc_sli4_arm_cqeq_intr(phba); + + /* Indicate device interrupt mode */ + phba->sli4_hba.intr_enable = 1; + + /* Allow asynchronous mailbox command to go through */ + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); + + /* Post receive buffers to the device */ + lpfc_sli4_rb_setup(phba); + + /* Start the ELS watchdog timer */ + /* + * The driver for SLI4 is not yet ready to process timeouts + * or interrupts. Once it is, the comment bars can be removed. + */ + /* mod_timer(&vport->els_tmofunc, + * jiffies + HZ * (phba->fc_ratov*2)); */ + + /* Start heart beat timer */ + mod_timer(&phba->hb_tmofunc, + jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + phba->hb_outstanding = 0; + phba->last_completion_time = jiffies; + + /* Start error attention (ERATT) polling timer */ + mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + + /* + * The port is ready, set the host's link state to LINK_DOWN + * in preparation for link interrupts. + */ + lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed); + mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + lpfc_set_loopback_flag(phba); + /* Change driver state to LPFC_LINK_DOWN right before init link */ + spin_lock_irq(&phba->hbalock); + phba->link_state = LPFC_LINK_DOWN; + spin_unlock_irq(&phba->hbalock); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (unlikely(rc != MBX_NOT_FINISHED)) { + kfree(vpd); + return 0; + } else + rc = -EIO; + + /* Unset all the queues set up in this routine when error out */ + if (rc) + lpfc_sli4_queue_unset(phba); + +out_stop_timers: + if (rc) + lpfc_stop_hba_timers(phba); +out_free_vpd: + kfree(vpd); +out_free_mbox: + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; +} /** * lpfc_mbox_timeout - Timeout call back function for mbox timer @@ -3244,7 +4608,7 @@ void lpfc_mbox_timeout_handler(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; - MAILBOX_t *mb = &pmbox->mb; + MAILBOX_t *mb = &pmbox->u.mb; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; @@ -3281,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) spin_unlock_irq(&phba->pport->work_port_lock); spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_UNKNOWN; - psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + psli->sli_flag &= ~LPFC_SLI_ACTIVE; spin_unlock_irq(&phba->hbalock); pring = &psli->ring[psli->fcp_ring]; @@ -3289,32 +4653,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0345 Resetting board due to mailbox timeout\n"); - /* - * lpfc_offline calls lpfc_sli_hba_down which will clean up - * on oustanding mailbox commands. - */ - /* If resets are disabled then set error state and return. */ - if (!phba->cfg_enable_hba_reset) { - phba->link_state = LPFC_HBA_ERROR; - return; - } - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_sli_brdrestart(phba); - lpfc_online(phba); - lpfc_unblock_mgmt_io(phba); - return; + + /* Reset the HBA device */ + lpfc_reset_hba(phba); } /** - * lpfc_sli_issue_mbox - Issue a mailbox command to firmware + * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware * @phba: Pointer to HBA context object. * @pmbox: Pointer to mailbox object. * @flag: Flag indicating how the mailbox need to be processed. * * This function is called by discovery code and HBA management code - * to submit a mailbox command to firmware. This function gets the - * hbalock to protect the data structures. + * to submit a mailbox command to firmware with SLI-3 interface spec. This + * function gets the hbalock to protect the data structures. * The mailbox command can be submitted in polling mode, in which case * this function will wait in a polling loop for the completion of the * mailbox. @@ -3332,8 +4684,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) * return codes the caller owns the mailbox command after the return of * the function. **/ -int -lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) +static int +lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, + uint32_t flag) { MAILBOX_t *mb; struct lpfc_sli *psli = &phba->sli; @@ -3349,6 +4702,10 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) spin_lock_irqsave(&phba->hbalock, drvr_flag); if (!pmbox) { /* processing mbox queue from intr_handler */ + if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + spin_unlock_irqrestore(&phba->hbalock, drvr_flag); + return MBX_SUCCESS; + } processing_queue = 1; phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; pmbox = lpfc_mbox_get(phba); @@ -3365,7 +4722,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, "1806 Mbox x%x failed. No vport\n", - pmbox->mb.mbxCommand); + pmbox->u.mb.mbxCommand); dump_stack(); goto out_not_finished; } @@ -3385,21 +4742,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) psli = &phba->sli; - mb = &pmbox->mb; + mb = &pmbox->u.mb; status = MBX_SUCCESS; if (phba->link_state == LPFC_HBA_ERROR) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):0311 Mailbox command x%x cannot " + "issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2528 Mailbox command x%x cannot " + "issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, psli->sli_flag, flag); goto out_not_finished; } @@ -3413,14 +4778,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2529 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); goto out_not_finished; } - if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { + if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2530 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); goto out_not_finished; } @@ -3462,12 +4837,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) /* If we are not polling, we MUST be in SLI2 mode */ if (flag != MBX_POLL) { - if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && + if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && (mb->mbxCommand != MBX_KILL_BOARD)) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command <mbxCommand> cannot issue */ - LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2531 Mailbox command x%x " + "cannot issue Data: x%x x%x\n", + pmbox->vport ? pmbox->vport->vpi : 0, + pmbox->u.mb.mbxCommand, + psli->sli_flag, flag); goto out_not_finished; } /* timeout active mbox command */ @@ -3506,7 +4886,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) /* next set own bit for the adapter and copy over command word */ mb->mbxOwner = OWN_CHIP; - if (psli->sli_flag & LPFC_SLI2_ACTIVE) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* First copy command data to host SLIM area */ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); } else { @@ -3529,7 +4909,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) if (mb->mbxCommand == MBX_CONFIG_PORT) { /* switch over to host mailbox */ - psli->sli_flag |= LPFC_SLI2_ACTIVE; + psli->sli_flag |= LPFC_SLI_ACTIVE; } } @@ -3552,7 +4932,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) writel(CA_MBATT, phba->CAregaddr); readl(phba->CAregaddr); /* flush */ - if (psli->sli_flag & LPFC_SLI2_ACTIVE) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* First read mbox status word */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); @@ -3591,7 +4971,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) spin_lock_irqsave(&phba->hbalock, drvr_flag); } - if (psli->sli_flag & LPFC_SLI2_ACTIVE) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* First copy command data */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); @@ -3604,7 +4984,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) if (((slimword0 & OWN_CHIP) != OWN_CHIP) && slimmb->mbxStatus) { psli->sli_flag &= - ~LPFC_SLI2_ACTIVE; + ~LPFC_SLI_ACTIVE; word0 = slimword0; } } @@ -3616,7 +4996,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) ha_copy = readl(phba->HAregaddr); } - if (psli->sli_flag & LPFC_SLI2_ACTIVE) { + if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* copy results back to user */ lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); } else { @@ -3643,13 +5023,420 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) out_not_finished: if (processing_queue) { - pmbox->mb.mbxStatus = MBX_NOT_FINISHED; + pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; lpfc_mbox_cmpl_put(phba, pmbox); } return MBX_NOT_FINISHED; } /** + * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to mailbox object. + * + * The function posts a mailbox to the port. The mailbox is expected + * to be comletely filled in and ready for the port to operate on it. + * This routine executes a synchronous completion operation on the + * mailbox by polling for its completion. + * + * The caller must not be holding any locks when calling this routine. + * + * Returns: + * MBX_SUCCESS - mailbox posted successfully + * Any of the MBX error values. + **/ +static int +lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + int rc = MBX_SUCCESS; + unsigned long iflag; + uint32_t db_ready; + uint32_t mcqe_status; + uint32_t mbx_cmnd; + unsigned long timeout; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_mqe *mb = &mboxq->u.mqe; + struct lpfc_bmbx_create *mbox_rgn; + struct dma_address *dma_address; + struct lpfc_register bmbx_reg; + + /* + * Only one mailbox can be active to the bootstrap mailbox region + * at a time and there is no queueing provided. + */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2532 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, MBX_POLL); + return MBXERR_ERROR; + } + /* The server grabs the token and owns it until release */ + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = mboxq; + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* + * Initialize the bootstrap memory region to avoid stale data areas + * in the mailbox post. Then copy the caller's mailbox contents to + * the bmbx mailbox region. + */ + mbx_cmnd = bf_get(lpfc_mqe_command, mb); + memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); + lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, + sizeof(struct lpfc_mqe)); + + /* Post the high mailbox dma address to the port and wait for ready. */ + dma_address = &phba->sli4_hba.bmbx.dma_address; + writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); + + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) + * 1000) + jiffies; + do { + bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); + db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); + if (!db_ready) + msleep(2); + + if (time_after(jiffies, timeout)) { + rc = MBXERR_ERROR; + goto exit; + } + } while (!db_ready); + + /* Post the low mailbox dma address to the port. */ + writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) + * 1000) + jiffies; + do { + bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); + db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); + if (!db_ready) + msleep(2); + + if (time_after(jiffies, timeout)) { + rc = MBXERR_ERROR; + goto exit; + } + } while (!db_ready); + + /* + * Read the CQ to ensure the mailbox has completed. + * If so, update the mailbox status so that the upper layers + * can complete the request normally. + */ + lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, + sizeof(struct lpfc_mqe)); + mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; + lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, + sizeof(struct lpfc_mcqe)); + mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); + + /* Prefix the mailbox status with range x4000 to note SLI4 status. */ + if (mcqe_status != MB_CQE_STATUS_SUCCESS) { + bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); + rc = MBXERR_ERROR; + } + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" + " x%x x%x CQ: x%x x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), + bf_get(lpfc_mqe_status, mb), + mb->un.mb_words[0], mb->un.mb_words[1], + mb->un.mb_words[2], mb->un.mb_words[3], + mb->un.mb_words[4], mb->un.mb_words[5], + mb->un.mb_words[6], mb->un.mb_words[7], + mb->un.mb_words[8], mb->un.mb_words[9], + mb->un.mb_words[10], mb->un.mb_words[11], + mb->un.mb_words[12], mboxq->mcqe.word0, + mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, + mboxq->mcqe.trailer); +exit: + /* We are holding the token, no needed for lock when release */ + spin_lock_irqsave(&phba->hbalock, iflag); + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return rc; +} + +/** + * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This function is called by discovery code and HBA management code to submit + * a mailbox command to firmware with SLI-4 interface spec. + * + * Return codes the caller owns the mailbox command after the return of the + * function. + **/ +static int +lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint32_t flag) +{ + struct lpfc_sli *psli = &phba->sli; + unsigned long iflags; + int rc; + + /* Detect polling mode and jump to a handler */ + if (!phba->sli4_hba.intr_enable) { + if (flag == MBX_POLL) + rc = lpfc_sli4_post_sync_mbox(phba, mboxq); + else + rc = -EIO; + if (rc != MBX_SUCCESS) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2541 Mailbox command x%x " + "(x%x) cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + return rc; + } else if (flag == MBX_POLL) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2542 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + return -EIO; + } + + /* Now, interrupt mode asynchrous mailbox command */ + rc = lpfc_mbox_cmd_check(phba, mboxq); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2543 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2544 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, flag); + goto out_not_finished; + } + + /* Put the mailbox command to the driver internal FIFO */ + psli->slistat.mbox_busy++; + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_mbox_put(phba, mboxq); + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0354 Mbox cmd issue - Enqueue Data: " + "x%x (x%x) x%x x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0xffffff, + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli4_mbox_opcode_get(phba, mboxq), + phba->pport->port_state, + psli->sli_flag, MBX_NOWAIT); + /* Wake up worker thread to transport mailbox command from head */ + lpfc_worker_wake_up(phba); + + return MBX_BUSY; + +out_not_finished: + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device + * @phba: Pointer to HBA context object. + * + * This function is called by worker thread to send a mailbox command to + * SLI4 HBA firmware. + * + **/ +int +lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *mboxq; + int rc = MBX_SUCCESS; + unsigned long iflags; + struct lpfc_mqe *mqe; + uint32_t mbx_cmnd; + + /* Check interrupt mode before post async mailbox command */ + if (unlikely(!phba->sli4_hba.intr_enable)) + return MBX_NOT_FINISHED; + + /* Check for mailbox command service token */ + spin_lock_irqsave(&phba->hbalock, iflags); + if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_NOT_FINISHED; + } + if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_NOT_FINISHED; + } + if (unlikely(phba->sli.mbox_active)) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "0384 There is pending active mailbox cmd\n"); + return MBX_NOT_FINISHED; + } + /* Take the mailbox command service token */ + psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; + + /* Get the next mailbox command from head of queue */ + mboxq = lpfc_mbox_get(phba); + + /* If no more mailbox command waiting for post, we're done */ + if (!mboxq) { + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return MBX_SUCCESS; + } + phba->sli.mbox_active = mboxq; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Check device readiness for posting mailbox command */ + rc = lpfc_mbox_dev_check(phba); + if (unlikely(rc)) + /* Driver clean routine will clean up pending mailbox */ + goto out_not_finished; + + /* Prepare the mbox command to be posted */ + mqe = &mboxq->u.mqe; + mbx_cmnd = bf_get(lpfc_mqe_command, mqe); + + /* Start timer for the mbox_tmo and log some mailbox post messages */ + mod_timer(&psli->mbox_tmo, (jiffies + + (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); + + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " + "x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + phba->pport->port_state, psli->sli_flag); + + if (mbx_cmnd != MBX_HEARTBEAT) { + if (mboxq->vport) { + lpfc_debugfs_disc_trc(mboxq->vport, + LPFC_DISC_TRC_MBOX_VPORT, + "MBOX Send vport: cmd:x%x mb:x%x x%x", + mbx_cmnd, mqe->un.mb_words[0], + mqe->un.mb_words[1]); + } else { + lpfc_debugfs_disc_trc(phba->pport, + LPFC_DISC_TRC_MBOX, + "MBOX Send: cmd:x%x mb:x%x x%x", + mbx_cmnd, mqe->un.mb_words[0], + mqe->un.mb_words[1]); + } + } + psli->slistat.mbox_cmd++; + + /* Post the mailbox command to the port */ + rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, + "(%d):2533 Mailbox command x%x (x%x) " + "cannot issue Data: x%x x%x\n", + mboxq->vport ? mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli4_mbox_opcode_get(phba, mboxq), + psli->sli_flag, MBX_NOWAIT); + goto out_not_finished; + } + + return rc; + +out_not_finished: + spin_lock_irqsave(&phba->hbalock, iflags); + mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; + __lpfc_mbox_cmpl_put(phba, mboxq); + /* Release the token */ + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return MBX_NOT_FINISHED; +} + +/** + * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command + * @phba: Pointer to HBA context object. + * @pmbox: Pointer to mailbox object. + * @flag: Flag indicating how the mailbox need to be processed. + * + * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from + * the API jump table function pointer from the lpfc_hba struct. + * + * Return codes the caller owns the mailbox command after the return of the + * function. + **/ +int +lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) +{ + return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); +} + +/** + * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the mbox interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; + phba->lpfc_sli_handle_slow_ring_event = + lpfc_sli_handle_slow_ring_event_s3; + phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; + phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; + phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; + break; + case LPFC_PCI_DEV_OC: + phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; + phba->lpfc_sli_handle_slow_ring_event = + lpfc_sli_handle_slow_ring_event_s4; + phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; + phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; + phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1420 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + return 0; +} + +/** * __lpfc_sli_ringtx_put - Add an iocb to the txq * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. @@ -3701,35 +5488,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } /** - * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb + * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. + * @ring_number: SLI ring number to issue iocb on. * @piocb: Pointer to command iocb. * @flag: Flag indicating if this command can be put into txq. * - * __lpfc_sli_issue_iocb is used by other functions in the driver - * to issue an iocb command to the HBA. If the PCI slot is recovering - * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT - * flag is turned on, the function returns IOCB_ERROR. - * When the link is down, this function allows only iocbs for - * posting buffers. - * This function finds next available slot in the command ring and - * posts the command to the available slot and writes the port - * attention register to request HBA start processing new iocb. - * If there is no slot available in the ring and - * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the - * txq, otherwise the function returns IOCB_BUSY. - * - * This function is called with hbalock held. - * The function will return success after it successfully submit the - * iocb to firmware or after adding to the txq. + * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue + * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is + * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT + * flag is turned on, the function returns IOCB_ERROR. When the link is down, + * this function allows only iocbs for posting buffers. This function finds + * next available slot in the command ring and posts the command to the + * available slot and writes the port attention register to request HBA start + * processing new iocb. If there is no slot available in the ring and + * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise + * the function returns IOCB_BUSY. + * + * This function is called with hbalock held. The function will return success + * after it successfully submit the iocb to firmware or after adding to the + * txq. **/ static int -__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, +__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_iocbq *nextiocb; IOCB_t *iocb; + struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; if (piocb->iocb_cmpl && (!piocb->vport) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && @@ -3833,6 +5619,498 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, return IOCB_BUSY; } +/** + * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. + * @phba: Pointer to HBA context object. + * @piocb: Pointer to command iocb. + * @sglq: Pointer to the scatter gather queue object. + * + * This routine converts the bpl or bde that is in the IOCB + * to a sgl list for the sli4 hardware. The physical address + * of the bpl/bde is converted back to a virtual address. + * If the IOCB contains a BPL then the list of BDE's is + * converted to sli4_sge's. If the IOCB contains a single + * BDE then it is converted to a single sli_sge. + * The IOCB is still in cpu endianess so the contents of + * the bpl can be used without byte swapping. + * + * Returns valid XRI = Success, NO_XRI = Failure. +**/ +static uint16_t +lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, + struct lpfc_sglq *sglq) +{ + uint16_t xritag = NO_XRI; + struct ulp_bde64 *bpl = NULL; + struct ulp_bde64 bde; + struct sli4_sge *sgl = NULL; + IOCB_t *icmd; + int numBdes = 0; + int i = 0; + + if (!piocbq || !sglq) + return xritag; + + sgl = (struct sli4_sge *)sglq->sgl; + icmd = &piocbq->iocb; + if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { + numBdes = icmd->un.genreq64.bdl.bdeSize / + sizeof(struct ulp_bde64); + /* The addrHigh and addrLow fields within the IOCB + * have not been byteswapped yet so there is no + * need to swap them back. + */ + bpl = (struct ulp_bde64 *) + ((struct lpfc_dmabuf *)piocbq->context3)->virt; + + if (!bpl) + return xritag; + + for (i = 0; i < numBdes; i++) { + /* Should already be byte swapped. */ + sgl->addr_hi = bpl->addrHigh; + sgl->addr_lo = bpl->addrLow; + /* swap the size field back to the cpu so we + * can assign it to the sgl. + */ + bde.tus.w = le32_to_cpu(bpl->tus.w); + bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize); + if ((i+1) == numBdes) + bf_set(lpfc_sli4_sge_last, sgl, 1); + else + bf_set(lpfc_sli4_sge_last, sgl, 0); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + bpl++; + sgl++; + } + } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { + /* The addrHigh and addrLow fields of the BDE have not + * been byteswapped yet so they need to be swapped + * before putting them in the sgl. + */ + sgl->addr_hi = + cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); + sgl->addr_lo = + cpu_to_le32(icmd->un.genreq64.bdl.addrLow); + bf_set(lpfc_sli4_sge_len, sgl, + icmd->un.genreq64.bdl.bdeSize); + bf_set(lpfc_sli4_sge_last, sgl, 1); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->word3 = cpu_to_le32(sgl->word3); + } + return sglq->sli4_xritag; +} + +/** + * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution + * @phba: Pointer to HBA context object. + * @piocb: Pointer to command iocb. + * + * This routine performs a round robin SCSI command to SLI4 FCP WQ index + * distribution. + * + * Return: index into SLI4 fast-path FCP queue index. + **/ +static uint32_t +lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) +{ + static uint32_t fcp_qidx; + + return fcp_qidx++ % phba->cfg_fcp_wq_count; +} + +/** + * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. + * @phba: Pointer to HBA context object. + * @piocb: Pointer to command iocb. + * @wqe: Pointer to the work queue entry. + * + * This routine converts the iocb command to its Work Queue Entry + * equivalent. The wqe pointer should not have any fields set when + * this routine is called because it will memcpy over them. + * This routine does not set the CQ_ID or the WQEC bits in the + * wqe. + * + * Returns: 0 = Success, IOCB_ERROR = Failure. + **/ +static int +lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, + union lpfc_wqe *wqe) +{ + uint32_t payload_len = 0; + uint8_t ct = 0; + uint32_t fip; + uint32_t abort_tag; + uint8_t command_type = ELS_COMMAND_NON_FIP; + uint8_t cmnd; + uint16_t xritag; + struct ulp_bde64 *bpl = NULL; + + fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); + /* The fcp commands will set command type */ + if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) + command_type = ELS_COMMAND_NON_FIP; + else if (!(iocbq->iocb_flag & LPFC_IO_FCP)) + command_type = ELS_COMMAND_FIP; + else if (iocbq->iocb_flag & LPFC_IO_FCP) + command_type = FCP_COMMAND; + else { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2019 Invalid cmd 0x%x\n", + iocbq->iocb.ulpCommand); + return IOCB_ERROR; + } + /* Some of the fields are in the right position already */ + memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); + abort_tag = (uint32_t) iocbq->iotag; + xritag = iocbq->sli4_xritag; + wqe->words[7] = 0; /* The ct field has moved so reset */ + /* words0-2 bpl convert bde */ + if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { + bpl = (struct ulp_bde64 *) + ((struct lpfc_dmabuf *)iocbq->context3)->virt; + if (!bpl) + return IOCB_ERROR; + + /* Should already be byte swapped. */ + wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); + wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); + /* swap the size field back to the cpu so we + * can assign it to the sgl. + */ + wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); + payload_len = wqe->generic.bde.tus.f.bdeSize; + } else + payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; + + iocbq->iocb.ulpIoTag = iocbq->iotag; + cmnd = iocbq->iocb.ulpCommand; + + switch (iocbq->iocb.ulpCommand) { + case CMD_ELS_REQUEST64_CR: + if (!iocbq->iocb.ulpLe) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2007 Only Limited Edition cmd Format" + " supported 0x%x\n", + iocbq->iocb.ulpCommand); + return IOCB_ERROR; + } + wqe->els_req.payload_len = payload_len; + /* Els_reguest64 has a TMO */ + bf_set(wqe_tmo, &wqe->els_req.wqe_com, + iocbq->iocb.ulpTimeout); + /* Need a VF for word 4 set the vf bit*/ + bf_set(els_req64_vf, &wqe->els_req, 0); + /* And a VFID for word 12 */ + bf_set(els_req64_vfid, &wqe->els_req, 0); + /* + * Set ct field to 3, indicates that the context_tag field + * contains the FCFI and remote N_Port_ID is + * in word 5. + */ + + ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); + bf_set(lpfc_wqe_gen_context, &wqe->generic, + iocbq->iocb.ulpContext); + + if (iocbq->vport->fc_myDID != 0) { + bf_set(els_req64_sid, &wqe->els_req, + iocbq->vport->fc_myDID); + bf_set(els_req64_sp, &wqe->els_req, 1); + } + bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); + bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); + /* CCP CCPE PV PRI in word10 were set in the memcpy */ + break; + case CMD_XMIT_SEQUENCE64_CR: + /* word3 iocb=io_tag32 wqe=payload_offset */ + /* payload offset used for multilpe outstanding + * sequences on the same exchange + */ + wqe->words[3] = 0; + /* word4 relative_offset memcpy */ + /* word5 r_ctl/df_ctl memcpy */ + bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); + wqe->xmit_sequence.xmit_len = payload_len; + break; + case CMD_XMIT_BCAST64_CN: + /* word3 iocb=iotag32 wqe=payload_len */ + wqe->words[3] = 0; /* no definition for this in wqe */ + /* word4 iocb=rsvd wqe=rsvd */ + /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ + /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ + bf_set(lpfc_wqe_gen_ct, &wqe->generic, + ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); + break; + case CMD_FCP_IWRITE64_CR: + command_type = FCP_COMMAND_DATA_OUT; + /* The struct for wqe fcp_iwrite has 3 fields that are somewhat + * confusing. + * word3 is payload_len: byte offset to the sgl entry for the + * fcp_command. + * word4 is total xfer len, same as the IOCB->ulpParameter. + * word5 is initial xfer len 0 = wait for xfer-ready + */ + + /* Always wait for xfer-ready before sending data */ + wqe->fcp_iwrite.initial_xfer_len = 0; + /* word 4 (xfer length) should have been set on the memcpy */ + + /* allow write to fall through to read */ + case CMD_FCP_IREAD64_CR: + /* FCP_CMD is always the 1st sgl entry */ + wqe->fcp_iread.payload_len = + payload_len + sizeof(struct fcp_rsp); + + /* word 4 (xfer length) should have been set on the memcpy */ + + bf_set(lpfc_wqe_gen_erp, &wqe->generic, + iocbq->iocb.ulpFCP2Rcvy); + bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS); + /* The XC bit and the XS bit are similar. The driver never + * tracked whether or not the exchange was previouslly open. + * XC = Exchange create, 0 is create. 1 is already open. + * XS = link cmd: 1 do not close the exchange after command. + * XS = 0 close exchange when command completes. + * The only time we would not set the XC bit is when the XS bit + * is set and we are sending our 2nd or greater command on + * this exchange. + */ + + /* ALLOW read & write to fall through to ICMD64 */ + case CMD_FCP_ICMND64_CR: + /* Always open the exchange */ + bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); + + wqe->words[10] &= 0xffff0000; /* zero out ebde count */ + bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + break; + case CMD_GEN_REQUEST64_CR: + /* word3 command length is described as byte offset to the + * rsp_data. Would always be 16, sizeof(struct sli4_sge) + * sgl[0] = cmnd + * sgl[1] = rsp. + * + */ + wqe->gen_req.command_len = payload_len; + /* Word4 parameter copied in the memcpy */ + /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ + /* word6 context tag copied in memcpy */ + if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { + ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2015 Invalid CT %x command 0x%x\n", + ct, iocbq->iocb.ulpCommand); + return IOCB_ERROR; + } + bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); + bf_set(wqe_tmo, &wqe->gen_req.wqe_com, + iocbq->iocb.ulpTimeout); + + bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + command_type = OTHER_COMMAND; + break; + case CMD_XMIT_ELS_RSP64_CX: + /* words0-2 BDE memcpy */ + /* word3 iocb=iotag32 wqe=rsvd */ + wqe->words[3] = 0; + /* word4 iocb=did wge=rsvd. */ + wqe->words[4] = 0; + /* word5 iocb=rsvd wge=did */ + bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, + iocbq->iocb.un.elsreq64.remoteID); + + bf_set(lpfc_wqe_gen_ct, &wqe->generic, + ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); + + bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); + if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) + bf_set(lpfc_wqe_gen_context, &wqe->generic, + iocbq->vport->vpi + phba->vpi_base); + command_type = OTHER_COMMAND; + break; + case CMD_CLOSE_XRI_CN: + case CMD_ABORT_XRI_CN: + case CMD_ABORT_XRI_CX: + /* words 0-2 memcpy should be 0 rserved */ + /* port will send abts */ + if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + /* + * The link is down so the fw does not need to send abts + * on the wire. + */ + bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); + else + bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); + bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); + abort_tag = iocbq->iocb.un.acxri.abortIoTag; + wqe->words[5] = 0; + bf_set(lpfc_wqe_gen_ct, &wqe->generic, + ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); + abort_tag = iocbq->iocb.un.acxri.abortIoTag; + wqe->generic.abort_tag = abort_tag; + /* + * The abort handler will send us CMD_ABORT_XRI_CN or + * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX + */ + bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); + cmnd = CMD_ABORT_XRI_CX; + command_type = OTHER_COMMAND; + xritag = 0; + break; + case CMD_XRI_ABORTED_CX: + case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ + /* words0-2 are all 0's no bde */ + /* word3 and word4 are rsvrd */ + wqe->words[3] = 0; + wqe->words[4] = 0; + /* word5 iocb=rsvd wge=did */ + /* There is no remote port id in the IOCB? */ + /* Let this fall through and fail */ + case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ + case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ + case CMD_FCP_TRSP64_CX: /* Target mode rcv */ + case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2014 Invalid command 0x%x\n", + iocbq->iocb.ulpCommand); + return IOCB_ERROR; + break; + + } + bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); + bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); + wqe->generic.abort_tag = abort_tag; + bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); + bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); + bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); + bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); + + return 0; +} + +/** + * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb + * @phba: Pointer to HBA context object. + * @ring_number: SLI ring number to issue iocb on. + * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. + * + * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue + * an iocb command to an HBA with SLI-4 interface spec. + * + * This function is called with hbalock held. The function will return success + * after it successfully submit the iocb to firmware or after adding to the + * txq. + **/ +static int +__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + struct lpfc_sglq *sglq; + uint16_t xritag; + union lpfc_wqe wqe; + struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; + uint32_t fcp_wqidx; + + if (piocb->sli4_xritag == NO_XRI) { + if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || + piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) + sglq = NULL; + else { + sglq = __lpfc_sli_get_sglq(phba); + if (!sglq) + return IOCB_ERROR; + piocb->sli4_xritag = sglq->sli4_xritag; + } + } else if (piocb->iocb_flag & LPFC_IO_FCP) { + sglq = NULL; /* These IO's already have an XRI and + * a mapped sgl. + */ + } else { + /* This is a continuation of a commandi,(CX) so this + * sglq is on the active list + */ + sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); + if (!sglq) + return IOCB_ERROR; + } + + if (sglq) { + xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq); + if (xritag != sglq->sli4_xritag) + return IOCB_ERROR; + } + + if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) + return IOCB_ERROR; + + if (piocb->iocb_flag & LPFC_IO_FCP) { + fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb); + if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) + return IOCB_ERROR; + } else { + if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) + return IOCB_ERROR; + } + lpfc_sli_ringtxcmpl_put(phba, pring, piocb); + + return 0; +} + +/** + * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb + * + * This routine wraps the actual lockless version for issusing IOCB function + * pointer from the lpfc_hba struct. + * + * Return codes: + * IOCB_ERROR - Error + * IOCB_SUCCESS - Success + * IOCB_BUSY - Busy + **/ +static inline int +__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, + struct lpfc_iocbq *piocb, uint32_t flag) +{ + return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); +} + +/** + * lpfc_sli_api_table_setup - Set up sli api fucntion jump table + * @phba: The hba struct for which this call is being executed. + * @dev_grp: The HBA PCI-Device group number. + * + * This routine sets up the SLI interface API function jump table in @phba + * struct. + * Returns: 0 - success, -ENODEV - failure. + **/ +int +lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) +{ + + switch (dev_grp) { + case LPFC_PCI_DEV_LP: + phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; + phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; + break; + case LPFC_PCI_DEV_OC: + phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; + phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1419 Invalid HBA PCI-device group: 0x%x\n", + dev_grp); + return -ENODEV; + break; + } + phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; + return 0; +} /** * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb @@ -3848,14 +6126,14 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * functions which do not hold hbalock. **/ int -lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, +lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { unsigned long iflags; int rc; spin_lock_irqsave(&phba->hbalock, iflags); - rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); + rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&phba->hbalock, iflags); return rc; @@ -4148,6 +6426,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) } /** + * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system + * @phba: Pointer to HBA context object. + * + * This routine flushes the mailbox command subsystem. It will unconditionally + * flush all the mailbox commands in the three possible stages in the mailbox + * command sub-system: pending mailbox command queue; the outstanding mailbox + * command; and completed mailbox command queue. It is caller's responsibility + * to make sure that the driver is in the proper state to flush the mailbox + * command sub-system. Namely, the posting of mailbox commands into the + * pending mailbox command queue from the various clients must be stopped; + * either the HBA is in a state that it will never works on the outstanding + * mailbox command (such as in EEH or ERATT conditions) or the outstanding + * mailbox command has been completed. + **/ +static void +lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) +{ + LIST_HEAD(completions); + struct lpfc_sli *psli = &phba->sli; + LPFC_MBOXQ_t *pmb; + unsigned long iflag; + + /* Flush all the mailbox commands in the mbox system */ + spin_lock_irqsave(&phba->hbalock, iflag); + /* The pending mailbox command queue */ + list_splice_init(&phba->sli.mboxq, &completions); + /* The outstanding active mailbox command */ + if (psli->mbox_active) { + list_add_tail(&psli->mbox_active->list, &completions); + psli->mbox_active = NULL; + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + } + /* The completed mailbox command queue */ + list_splice_init(&phba->sli.mboxq_cmpl, &completions); + spin_unlock_irqrestore(&phba->hbalock, iflag); + + /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ + while (!list_empty(&completions)) { + list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); + pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba, pmb); + } +} + +/** * lpfc_sli_host_down - Vport cleanup function * @vport: Pointer to virtual port object. * @@ -4240,9 +6564,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *buf_ptr; - LPFC_MBOXQ_t *pmb; - int i; unsigned long flags = 0; + int i; + + /* Shutdown the mailbox command sub-system */ + lpfc_sli_mbox_sys_shutdown(phba); lpfc_hba_down_prep(phba); @@ -4287,28 +6613,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) /* Return any active mbox cmds */ del_timer_sync(&psli->mbox_tmo); - spin_lock_irqsave(&phba->hbalock, flags); - spin_lock(&phba->pport->work_port_lock); + spin_lock_irqsave(&phba->pport->work_port_lock, flags); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; - spin_unlock(&phba->pport->work_port_lock); + spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - /* Return any pending or completed mbox cmds */ - list_splice_init(&phba->sli.mboxq, &completions); - if (psli->mbox_active) { - list_add_tail(&psli->mbox_active->list, &completions); - psli->mbox_active = NULL; - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - } - list_splice_init(&phba->sli.mboxq_cmpl, &completions); - spin_unlock_irqrestore(&phba->hbalock, flags); + return 1; +} + +/** + * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA + * @phba: Pointer to HBA context object. + * + * This function cleans up all queues, iocb, buffers, mailbox commands while + * shutting down the SLI4 HBA FCoE function. This function is called with no + * lock held and always returns 1. + * + * This function does the following to cleanup driver FCoE function resources: + * - Free discovery resources for each virtual port + * - Cleanup any pending fabric iocbs + * - Iterate through the iocb txq and free each entry in the list. + * - Free up any buffer posted to the HBA. + * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc. + * - Free mailbox commands in the mailbox queue. + **/ +int +lpfc_sli4_hba_down(struct lpfc_hba *phba) +{ + /* Stop the SLI4 device port */ + lpfc_stop_port(phba); + + /* Tear down the queues in the HBA */ + lpfc_sli4_queue_unset(phba); + + /* unregister default FCFI from the HBA */ + lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); - while (!list_empty(&completions)) { - list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); - pmb->mb.mbxStatus = MBX_NOT_FINISHED; - if (pmb->mbox_cmpl) - pmb->mbox_cmpl(phba,pmb); - } return 1; } @@ -4639,7 +6979,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt = &abtsiocbp->iocb; iabt->un.acxri.abortType = ABORT_TYPE_ABTS; iabt->un.acxri.abortContextTag = icmd->ulpContext; - iabt->un.acxri.abortIoTag = icmd->ulpIoTag; + if (phba->sli_rev == LPFC_SLI_REV4) + iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; + else + iabt->un.acxri.abortIoTag = icmd->ulpIoTag; iabt->ulpLe = 1; iabt->ulpClass = icmd->ulpClass; @@ -4655,7 +6998,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, "abort cmd iotag x%x\n", iabt->un.acxri.abortContextTag, iabt->un.acxri.abortIoTag, abtsiocbp->iotag); - retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); + retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); if (retval) __lpfc_sli_release_iocbq(phba, abtsiocbp); @@ -4838,7 +7181,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, cmd = &iocbq->iocb; abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; - abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; + if (phba->sli_rev == LPFC_SLI_REV4) + abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; + else + abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpClass = cmd->ulpClass; abtsiocb->vport = phba->pport; @@ -4850,7 +7196,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, /* Setup callback routine and issue the command. */ abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; - ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); + ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, + abtsiocb, 0); if (ret_val == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); errcnt++; @@ -4931,7 +7278,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, **/ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, + uint32_t ring_number, struct lpfc_iocbq *piocb, struct lpfc_iocbq *prspiocbq, uint32_t timeout) @@ -4962,7 +7309,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, readl(phba->HCregaddr); /* flush */ } - retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); + retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); if (retval == IOCB_SUCCESS) { timeout_req = timeout * HZ; timeleft = wait_event_timeout(done_q, @@ -5077,53 +7424,156 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, } /** - * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function + * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system * @phba: Pointer to HBA context. * - * This function is called to cleanup any pending mailbox - * objects in the driver queue before bringing the HBA offline. - * This function is called while resetting the HBA. - * The function is called without any lock held. The function - * takes hbalock to update SLI data structure. - * This function returns 1 when there is an active mailbox - * command pending else returns 0. + * This function is called to shutdown the driver's mailbox sub-system. + * It first marks the mailbox sub-system is in a block state to prevent + * the asynchronous mailbox command from issued off the pending mailbox + * command queue. If the mailbox command sub-system shutdown is due to + * HBA error conditions such as EEH or ERATT, this routine shall invoke + * the mailbox sub-system flush routine to forcefully bring down the + * mailbox sub-system. Otherwise, if it is due to normal condition (such + * as with offline or HBA function reset), this routine will wait for the + * outstanding mailbox command to complete before invoking the mailbox + * sub-system flush routine to gracefully bring down mailbox sub-system. **/ -int -lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) +void +lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) { - struct lpfc_vport *vport = phba->pport; - int i = 0; - uint32_t ha_copy; + struct lpfc_sli *psli = &phba->sli; + uint8_t actcmd = MBX_HEARTBEAT; + unsigned long timeout; - while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { - if (i++ > LPFC_MBOX_TMO * 1000) - return 1; + spin_lock_irq(&phba->hbalock); + psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; + spin_unlock_irq(&phba->hbalock); - /* - * Call lpfc_sli_handle_mb_event only if a mailbox cmd - * did finish. This way we won't get the misleading - * "Stray Mailbox Interrupt" message. - */ + if (psli->sli_flag & LPFC_SLI_ACTIVE) { spin_lock_irq(&phba->hbalock); - ha_copy = phba->work_ha; - phba->work_ha &= ~HA_MBATT; + if (phba->sli.mbox_active) + actcmd = phba->sli.mbox_active->u.mb.mbxCommand; spin_unlock_irq(&phba->hbalock); + /* Determine how long we might wait for the active mailbox + * command to be gracefully completed by firmware. + */ + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * + 1000) + jiffies; + while (phba->sli.mbox_active) { + /* Check active mailbox complete status every 2ms */ + msleep(2); + if (time_after(jiffies, timeout)) + /* Timeout, let the mailbox flush routine to + * forcefully release active mailbox command + */ + break; + } + } + lpfc_sli_mbox_sys_flush(phba); +} + +/** + * lpfc_sli_eratt_read - read sli-3 error attention events + * @phba: Pointer to HBA context. + * + * This function is called to read the SLI3 device error attention registers + * for possible error attention events. The caller must hold the hostlock + * with spin_lock_irq(). + * + * This fucntion returns 1 when there is Error Attention in the Host Attention + * Register and returns 0 otherwise. + **/ +static int +lpfc_sli_eratt_read(struct lpfc_hba *phba) +{ + uint32_t ha_copy; - if (ha_copy & HA_MBATT) - if (lpfc_sli_handle_mb_event(phba) == 0) - i = 0; + /* Read chip Host Attention (HA) register */ + ha_copy = readl(phba->HAregaddr); + if (ha_copy & HA_ERATT) { + /* Read host status register to retrieve error event */ + lpfc_sli_read_hs(phba); + + /* Check if there is a deferred error condition is active */ + if ((HS_FFER1 & phba->work_hs) && + ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | + HS_FFER6 | HS_FFER7) & phba->work_hs)) { + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= DEFER_ERATT; + spin_unlock_irq(&phba->hbalock); + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); + } - msleep(1); + /* Set the driver HA work bitmap */ + spin_lock_irq(&phba->hbalock); + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + spin_unlock_irq(&phba->hbalock); + return 1; } + return 0; +} + +/** + * lpfc_sli4_eratt_read - read sli-4 error attention events + * @phba: Pointer to HBA context. + * + * This function is called to read the SLI4 device error attention registers + * for possible error attention events. The caller must hold the hostlock + * with spin_lock_irq(). + * + * This fucntion returns 1 when there is Error Attention in the Host Attention + * Register and returns 0 otherwise. + **/ +static int +lpfc_sli4_eratt_read(struct lpfc_hba *phba) +{ + uint32_t uerr_sta_hi, uerr_sta_lo; + uint32_t onlnreg0, onlnreg1; - return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; + /* For now, use the SLI4 device internal unrecoverable error + * registers for error attention. This can be changed later. + */ + onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); + onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); + if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { + uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); + uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); + if (uerr_sta_lo || uerr_sta_hi) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1423 HBA Unrecoverable error: " + "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " + "online0_reg=0x%x, online1_reg=0x%x\n", + uerr_sta_lo, uerr_sta_hi, + onlnreg0, onlnreg1); + /* TEMP: as the driver error recover logic is not + * fully developed, we just log the error message + * and the device error attention action is now + * temporarily disabled. + */ + return 0; + phba->work_status[0] = uerr_sta_lo; + phba->work_status[1] = uerr_sta_hi; + spin_lock_irq(&phba->hbalock); + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + spin_unlock_irq(&phba->hbalock); + return 1; + } + } + return 0; } /** * lpfc_sli_check_eratt - check error attention events * @phba: Pointer to HBA context. * - * This function is called form timer soft interrupt context to check HBA's + * This function is called from timer soft interrupt context to check HBA's * error attention register bit for error attention events. * * This fucntion returns 1 when there is Error Attention in the Host Attention @@ -5134,10 +7584,6 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) { uint32_t ha_copy; - /* If PCI channel is offline, don't process it */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return 0; - /* If somebody is waiting to handle an eratt, don't process it * here. The brdkill function will do this. */ @@ -5161,56 +7607,84 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) return 0; } - /* Read chip Host Attention (HA) register */ - ha_copy = readl(phba->HAregaddr); - if (ha_copy & HA_ERATT) { - /* Read host status register to retrieve error event */ - lpfc_sli_read_hs(phba); - - /* Check if there is a deferred error condition is active */ - if ((HS_FFER1 & phba->work_hs) && - ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | - HS_FFER6 | HS_FFER7) & phba->work_hs)) { - phba->hba_flag |= DEFER_ERATT; - /* Clear all interrupt enable conditions */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); - } - - /* Set the driver HA work bitmap */ - phba->work_ha |= HA_ERATT; - /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; + /* If PCI channel is offline, don't process it */ + if (unlikely(pci_channel_offline(phba->pcidev))) { spin_unlock_irq(&phba->hbalock); - return 1; + return 0; + } + + switch (phba->sli_rev) { + case LPFC_SLI_REV2: + case LPFC_SLI_REV3: + /* Read chip Host Attention (HA) register */ + ha_copy = lpfc_sli_eratt_read(phba); + break; + case LPFC_SLI_REV4: + /* Read devcie Uncoverable Error (UERR) registers */ + ha_copy = lpfc_sli4_eratt_read(phba); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0299 Invalid SLI revision (%d)\n", + phba->sli_rev); + ha_copy = 0; + break; } spin_unlock_irq(&phba->hbalock); + + return ha_copy; +} + +/** + * lpfc_intr_state_check - Check device state for interrupt handling + * @phba: Pointer to HBA context. + * + * This inline routine checks whether a device or its PCI slot is in a state + * that the interrupt should be handled. + * + * This function returns 0 if the device or the PCI slot is in a state that + * interrupt should be handled, otherwise -EIO. + */ +static inline int +lpfc_intr_state_check(struct lpfc_hba *phba) +{ + /* If the pci channel is offline, ignore all the interrupts */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return -EIO; + + /* Update device level interrupt statistics */ + phba->sli.slistat.sli_intr++; + + /* Ignore all interrupts during initialization. */ + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + return -EIO; + return 0; } /** - * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver + * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt - * service routine when the device is enabled with MSI-X multi-message - * interrupt mode and there are slow-path events in the HBA. However, - * when the device is enabled with either MSI or Pin-IRQ interrupt mode, - * this function is called as part of the device-level interrupt handler. - * When the PCI slot is in error recovery or the HBA is undergoing - * initialization, the interrupt handler will not process the interrupt. - * The link attention and ELS ring attention events are handled by the - * worker thread. The interrupt handler signals the worker thread and - * and returns for these events. This function is called without any - * lock held. It gets the hbalock to access and update SLI data + * service routine when device with SLI-3 interface spec is enabled with + * MSI-X multi-message interrupt mode and there are slow-path events in + * the HBA. However, when the device is enabled with either MSI or Pin-IRQ + * interrupt mode, this function is called as part of the device-level + * interrupt handler. When the PCI slot is in error recovery or the HBA + * is undergoing initialization, the interrupt handler will not process + * the interrupt. The link attention and ELS ring attention events are + * handled by the worker thread. The interrupt handler signals the worker + * thread and returns for these events. This function is called without + * any lock held. It gets the hbalock to access and update SLI data * structures. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_sp_intr_handler(int irq, void *dev_id) +lpfc_sli_sp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; uint32_t ha_copy; @@ -5240,13 +7714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id) * individual interrupt handler in MSI-X multi-message interrupt mode */ if (phba->intr_type == MSIX) { - /* If the pci channel is offline, ignore all the interrupts */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return IRQ_NONE; - /* Update device-level interrupt statistics */ - phba->sli.slistat.sli_intr++; - /* Ignore all interrupts during initialization. */ - if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for slow-path events */ spin_lock_irqsave(&phba->hbalock, iflag); @@ -5271,7 +7740,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) * interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } @@ -5364,7 +7833,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id) if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { pmb = phba->sli.mbox_active; - pmbox = &pmb->mb; + pmbox = &pmb->u.mb; mbox = phba->mbox; vport = pmb->vport; @@ -5434,7 +7903,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id) LOG_MBOX | LOG_SLI, "0350 rc should have" "been MBX_BUSY"); - goto send_current_mbox; + if (rc != MBX_NOT_FINISHED) + goto send_current_mbox; } } spin_lock_irqsave( @@ -5471,29 +7941,29 @@ send_current_mbox: } return IRQ_HANDLED; -} /* lpfc_sp_intr_handler */ +} /* lpfc_sli_sp_intr_handler */ /** - * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver + * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt - * service routine when the device is enabled with MSI-X multi-message - * interrupt mode and there is a fast-path FCP IOCB ring event in the - * HBA. However, when the device is enabled with either MSI or Pin-IRQ - * interrupt mode, this function is called as part of the device-level - * interrupt handler. When the PCI slot is in error recovery or the HBA - * is undergoing initialization, the interrupt handler will not process - * the interrupt. The SCSI FCP fast-path ring event are handled in the - * intrrupt context. This function is called without any lock held. It - * gets the hbalock to access and update SLI data structures. + * service routine when device with SLI-3 interface spec is enabled with + * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB + * ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The SCSI FCP fast-path ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_fp_intr_handler(int irq, void *dev_id) +lpfc_sli_fp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; uint32_t ha_copy; @@ -5513,13 +7983,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id) * individual interrupt handler in MSI-X multi-message interrupt mode */ if (phba->intr_type == MSIX) { - /* If pci channel is offline, ignore all the interrupts */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return IRQ_NONE; - /* Update device-level interrupt statistics */ - phba->sli.slistat.sli_intr++; - /* Ignore all interrupts during initialization. */ - if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) return IRQ_NONE; /* Need to read HA REG for FCP ring and other ring events */ ha_copy = readl(phba->HAregaddr); @@ -5530,7 +7995,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id) * any interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), @@ -5566,26 +8031,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id) } } return IRQ_HANDLED; -} /* lpfc_fp_intr_handler */ +} /* lpfc_sli_fp_intr_handler */ /** - * lpfc_intr_handler - The device-level interrupt handler of lpfc driver + * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device * @irq: Interrupt number. * @dev_id: The device context pointer. * - * This function is the device-level interrupt handler called from the PCI - * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is - * an event in the HBA which requires driver attention. This function - * invokes the slow-path interrupt attention handling function and fast-path - * interrupt attention handling function in turn to process the relevant - * HBA attention events. This function is called without any lock held. It - * gets the hbalock to access and update SLI data structures. + * This function is the HBA device-level interrupt handler to device with + * SLI-3 interface spec, called from the PCI layer when either MSI or + * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which + * requires driver attention. This function invokes the slow-path interrupt + * attention handling function and fast-path interrupt attention handling + * function in turn to process the relevant HBA attention events. This + * function is called without any lock held. It gets the hbalock to access + * and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled, else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_intr_handler(int irq, void *dev_id) +lpfc_sli_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; irqreturn_t sp_irq_rc, fp_irq_rc; @@ -5600,15 +8066,8 @@ lpfc_intr_handler(int irq, void *dev_id) if (unlikely(!phba)) return IRQ_NONE; - /* If the pci channel is offline, ignore all the interrupts. */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return IRQ_NONE; - - /* Update device level interrupt statistics */ - phba->sli.slistat.sli_intr++; - - /* Ignore all interrupts during initialization. */ - if (unlikely(phba->link_state < LPFC_LINK_DOWN)) + /* Check device state for handling interrupt */ + if (lpfc_intr_state_check(phba)) return IRQ_NONE; spin_lock(&phba->hbalock); @@ -5650,7 +8109,7 @@ lpfc_intr_handler(int irq, void *dev_id) status2 >>= (4*LPFC_ELS_RING); if (status1 || (status2 & HA_RXMASK)) - sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); + sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); else sp_irq_rc = IRQ_NONE; @@ -5670,10 +8129,3322 @@ lpfc_intr_handler(int irq, void *dev_id) status2 = 0; if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) - fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); + fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); else fp_irq_rc = IRQ_NONE; /* Return device-level interrupt handling status */ return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; -} /* lpfc_intr_handler */ +} /* lpfc_sli_intr_handler */ + +/** + * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 FCP abort XRI events. + **/ +void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + + /* First, declare the fcp xri abort event has been handled */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; + spin_unlock_irq(&phba->hbalock); + /* Now, handle all the fcp xri abort events */ + while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { + /* Get the first event from the head of the event queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + /* Notify aborted XRI for FCP work queue */ + lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); + /* Free the event processed back to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + } +} + +/** + * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked by the worker thread to process all the pending + * SLI4 els abort xri events. + **/ +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) +{ + struct lpfc_cq_event *cq_event; + + /* First, declare the els xri abort event has been handled */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; + spin_unlock_irq(&phba->hbalock); + /* Now, handle all the els xri abort events */ + while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { + /* Get the first event from the head of the event queue */ + spin_lock_irq(&phba->hbalock); + list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, + cq_event, struct lpfc_cq_event, list); + spin_unlock_irq(&phba->hbalock); + /* Notify aborted XRI for ELS work queue */ + lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); + /* Free the event processed back to the free pool */ + lpfc_sli4_cq_event_release(phba, cq_event); + } +} + +static void +lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, + struct lpfc_iocbq *pIocbOut, + struct lpfc_wcqe_complete *wcqe) +{ + size_t offset = offsetof(struct lpfc_iocbq, iocb); + + memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, + sizeof(struct lpfc_iocbq) - offset); + memset(&pIocbIn->sli4_info, 0, + sizeof(struct lpfc_sli4_rspiocb_info)); + /* Map WCQE parameters into irspiocb parameters */ + pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); + if (pIocbOut->iocb_flag & LPFC_IO_FCP) + if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) + pIocbIn->iocb.un.fcpi.fcpi_parm = + pIocbOut->iocb.un.fcpi.fcpi_parm - + wcqe->total_data_placed; + else + pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; + else + pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; + /* Load in additional WCQE parameters */ + pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); + pIocbIn->sli4_info.bfield = 0; + if (bf_get(lpfc_wcqe_c_xb, wcqe)) + pIocbIn->sli4_info.bfield |= LPFC_XB; + if (bf_get(lpfc_wcqe_c_pv, wcqe)) { + pIocbIn->sli4_info.bfield |= LPFC_PV; + pIocbIn->sli4_info.priority = + bf_get(lpfc_wcqe_c_priority, wcqe); + } +} + +/** + * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event + * @phba: Pointer to HBA context object. + * @cqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry with asynchrous + * event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) +{ + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "0392 Async Event: word0:x%x, word1:x%x, " + "word2:x%x, word3:x%x\n", mcqe->word0, + mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); + + /* Allocate a new internal CQ_EVENT entry */ + cq_event = lpfc_sli4_cq_event_alloc(phba); + if (!cq_event) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0394 Failed to allocate CQ_EVENT entry\n"); + return false; + } + + /* Move the CQE into an asynchronous event entry */ + memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); + /* Set the async event flag */ + phba->hba_flag |= ASYNC_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + return true; +} + +/** + * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event + * @phba: Pointer to HBA context object. + * @cqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry with mailbox + * completion event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) +{ + uint32_t mcqe_status; + MAILBOX_t *mbox, *pmbox; + struct lpfc_mqe *mqe; + struct lpfc_vport *vport; + struct lpfc_nodelist *ndlp; + struct lpfc_dmabuf *mp; + unsigned long iflags; + LPFC_MBOXQ_t *pmb; + bool workposted = false; + int rc; + + /* If not a mailbox complete MCQE, out by checking mailbox consume */ + if (!bf_get(lpfc_trailer_completed, mcqe)) + goto out_no_mqe_complete; + + /* Get the reference to the active mbox command */ + spin_lock_irqsave(&phba->hbalock, iflags); + pmb = phba->sli.mbox_active; + if (unlikely(!pmb)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "1832 No pending MBOX command to handle\n"); + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out_no_mqe_complete; + } + spin_unlock_irqrestore(&phba->hbalock, iflags); + mqe = &pmb->u.mqe; + pmbox = (MAILBOX_t *)&pmb->u.mqe; + mbox = phba->mbox; + vport = pmb->vport; + + /* Reset heartbeat timer */ + phba->last_completion_time = jiffies; + del_timer(&phba->sli.mbox_tmo); + + /* Move mbox data to caller's mailbox region, do endian swapping */ + if (pmb->mbox_cmpl && mbox) + lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); + /* Set the mailbox status with SLI4 range 0x4000 */ + mcqe_status = bf_get(lpfc_mcqe_status, mcqe); + if (mcqe_status != MB_CQE_STATUS_SUCCESS) + bf_set(lpfc_mqe_status, mqe, + (LPFC_MBX_ERROR_RANGE | mcqe_status)); + + if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { + pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, + "MBOX dflt rpi: status:x%x rpi:x%x", + mcqe_status, + pmbox->un.varWords[0], 0); + if (mcqe_status == MB_CQE_STATUS_SUCCESS) { + mp = (struct lpfc_dmabuf *)(pmb->context1); + ndlp = (struct lpfc_nodelist *)pmb->context2; + /* Reg_LOGIN of dflt RPI was successful. Now lets get + * RID of the PPI using the same mbox buffer. + */ + lpfc_unreg_login(phba, vport->vpi, + pmbox->un.varWords[0], pmb); + pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; + pmb->context1 = mp; + pmb->context2 = ndlp; + pmb->vport = vport; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc != MBX_BUSY) + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | + LOG_SLI, "0385 rc should " + "have been MBX_BUSY\n"); + if (rc != MBX_NOT_FINISHED) + goto send_current_mbox; + } + } + spin_lock_irqsave(&phba->pport->work_port_lock, iflags); + phba->pport->work_port_events &= ~WORKER_MBOX_TMO; + spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); + + /* There is mailbox completion work to do */ + spin_lock_irqsave(&phba->hbalock, iflags); + __lpfc_mbox_cmpl_put(phba, pmb); + phba->work_ha |= HA_MBATT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + +send_current_mbox: + spin_lock_irqsave(&phba->hbalock, iflags); + /* Release the mailbox command posting token */ + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + /* Setting active mailbox pointer need to be in sync to flag clear */ + phba->sli.mbox_active = NULL; + spin_unlock_irqrestore(&phba->hbalock, iflags); + /* Wake up worker thread to post the next pending mailbox command */ + lpfc_worker_wake_up(phba); +out_no_mqe_complete: + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); + return workposted; +} + +/** + * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry + * @phba: Pointer to HBA context object. + * @cqe: Pointer to mailbox completion queue entry. + * + * This routine process a mailbox completion queue entry, it invokes the + * proper mailbox complete handling or asynchrous event handling routine + * according to the MCQE's async bit. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) +{ + struct lpfc_mcqe mcqe; + bool workposted; + + /* Copy the mailbox MCQE and convert endian order as needed */ + lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); + + /* Invoke the proper event handling routine */ + if (!bf_get(lpfc_trailer_async, &mcqe)) + workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); + else + workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); + return workposted; +} + +/** + * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event + * @phba: Pointer to HBA context object. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an ELS work-queue completion event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_iocbq *cmdiocbq; + struct lpfc_iocbq *irspiocbq; + unsigned long iflags; + bool workposted = false; + + spin_lock_irqsave(&phba->hbalock, iflags); + pring->stats.iocb_event++; + /* Look up the ELS command IOCB and create pseudo response IOCB */ + cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + if (unlikely(!cmdiocbq)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0386 ELS complete with no corresponding " + "cmdiocb: iotag (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + return workposted; + } + + /* Fake the irspiocbq and copy necessary response information */ + irspiocbq = lpfc_sli_get_iocbq(phba); + if (!irspiocbq) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0387 Failed to allocate an iocbq\n"); + return workposted; + } + lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); + + /* Add the irspiocb to the response IOCB work list */ + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); + /* Indicate ELS ring attention */ + phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + + return workposted; +} + +/** + * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event + * @phba: Pointer to HBA context object. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles slow-path WQ entry comsumed event by invoking the + * proper WQ release routine to the slow-path WQ. + **/ +static void +lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, + struct lpfc_wcqe_release *wcqe) +{ + /* Check for the slow-path ELS work queue */ + if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) + lpfc_sli4_wq_release(phba->sli4_hba.els_wq, + bf_get(lpfc_wcqe_r_wqe_index, wcqe)); + else + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2579 Slow-path wqe consume event carries " + "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", + bf_get(lpfc_wcqe_r_wqe_index, wcqe), + phba->sli4_hba.els_wq->queue_id); +} + +/** + * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event + * @phba: Pointer to HBA context object. + * @cq: Pointer to a WQ completion queue. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an XRI abort event. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, + struct lpfc_queue *cq, + struct sli4_wcqe_xri_aborted *wcqe) +{ + bool workposted = false; + struct lpfc_cq_event *cq_event; + unsigned long iflags; + + /* Allocate a new internal CQ_EVENT entry */ + cq_event = lpfc_sli4_cq_event_alloc(phba); + if (!cq_event) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0602 Failed to allocate CQ_EVENT entry\n"); + return false; + } + + /* Move the CQE into the proper xri abort event list */ + memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); + switch (cq->subtype) { + case LPFC_FCP: + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); + /* Set the fcp xri abort event flag */ + phba->hba_flag |= FCP_XRI_ABORT_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + case LPFC_ELS: + spin_lock_irqsave(&phba->hbalock, iflags); + list_add_tail(&cq_event->list, + &phba->sli4_hba.sp_els_xri_aborted_work_queue); + /* Set the els xri abort event flag */ + phba->hba_flag |= ELS_XRI_ABORT_EVENT; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0603 Invalid work queue CQE subtype (x%x)\n", + cq->subtype); + workposted = false; + break; + } + return workposted; +} + +/** + * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry + * @phba: Pointer to HBA context object. + * @cq: Pointer to the completion queue. + * @wcqe: Pointer to a completion queue entry. + * + * This routine process a slow-path work-queue completion queue entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + struct lpfc_wcqe_complete wcqe; + bool workposted = false; + + /* Copy the work queue CQE and convert endian order if needed */ + lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); + + /* Check and process for different type of WCQE and dispatch */ + switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { + case CQE_CODE_COMPL_WQE: + /* Process the WQ complete event */ + workposted = lpfc_sli4_sp_handle_els_wcqe(phba, + (struct lpfc_wcqe_complete *)&wcqe); + break; + case CQE_CODE_RELEASE_WQE: + /* Process the WQ release event */ + lpfc_sli4_sp_handle_rel_wcqe(phba, + (struct lpfc_wcqe_release *)&wcqe); + break; + case CQE_CODE_XRI_ABORTED: + /* Process the WQ XRI abort event */ + workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, + (struct sli4_wcqe_xri_aborted *)&wcqe); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0388 Not a valid WCQE code: x%x\n", + bf_get(lpfc_wcqe_c_code, &wcqe)); + break; + } + return workposted; +} + +/** + * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry + * @phba: Pointer to HBA context object. + * @rcqe: Pointer to receive-queue completion queue entry. + * + * This routine process a receive-queue completion queue entry. + * + * Return: true if work posted to worker thread, otherwise false. + **/ +static bool +lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) +{ + struct lpfc_rcqe rcqe; + bool workposted = false; + struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; + struct lpfc_queue *drq = phba->sli4_hba.dat_rq; + struct hbq_dmabuf *dma_buf; + uint32_t status; + unsigned long iflags; + + /* Copy the receive queue CQE and convert endian order if needed */ + lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe)); + lpfc_sli4_rq_release(hrq, drq); + if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE) + goto out; + if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id) + goto out; + + status = bf_get(lpfc_rcqe_status, &rcqe); + switch (status) { + case FC_STATUS_RQ_BUF_LEN_EXCEEDED: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2537 Receive Frame Truncated!!\n"); + case FC_STATUS_RQ_SUCCESS: + spin_lock_irqsave(&phba->hbalock, iflags); + dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); + if (!dma_buf) { + spin_unlock_irqrestore(&phba->hbalock, iflags); + goto out; + } + memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); + /* save off the frame for the word thread to process */ + list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); + /* Frame received */ + phba->hba_flag |= HBA_RECEIVE_BUFFER; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + case FC_STATUS_INSUFF_BUF_NEED_BUF: + case FC_STATUS_INSUFF_BUF_FRM_DISC: + /* Post more buffers if possible */ + spin_lock_irqsave(&phba->hbalock, iflags); + phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; + spin_unlock_irqrestore(&phba->hbalock, iflags); + workposted = true; + break; + } +out: + return workposted; + +} + +/** + * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry + * @phba: Pointer to HBA context object. + * @eqe: Pointer to fast-path event queue entry. + * + * This routine process a event queue entry from the slow-path event queue. + * It will check the MajorCode and MinorCode to determine this is for a + * completion event on a completion queue, if not, an error shall be logged + * and just return. Otherwise, it will get to the corresponding completion + * queue and process all the entries on that completion queue, rearm the + * completion queue, and then return. + * + **/ +static void +lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) +{ + struct lpfc_queue *cq = NULL, *childq, *speq; + struct lpfc_cqe *cqe; + bool workposted = false; + int ecount = 0; + uint16_t cqid; + + if (bf_get(lpfc_eqe_major_code, eqe) != 0 || + bf_get(lpfc_eqe_minor_code, eqe) != 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0359 Not a valid slow-path completion " + "event: majorcode=x%x, minorcode=x%x\n", + bf_get(lpfc_eqe_major_code, eqe), + bf_get(lpfc_eqe_minor_code, eqe)); + return; + } + + /* Get the reference to the corresponding CQ */ + cqid = bf_get(lpfc_eqe_resource_id, eqe); + + /* Search for completion queue pointer matching this cqid */ + speq = phba->sli4_hba.sp_eq; + list_for_each_entry(childq, &speq->child_list, list) { + if (childq->queue_id == cqid) { + cq = childq; + break; + } + } + if (unlikely(!cq)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0365 Slow-path CQ identifier (%d) does " + "not exist\n", cqid); + return; + } + + /* Process all the entries to the CQ */ + switch (cq->type) { + case LPFC_MCQ: + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + break; + case LPFC_WCQ: + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + break; + case LPFC_RCQ: + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0370 Invalid completion queue type (%d)\n", + cq->type); + return; + } + + /* Catch the no cq entry condition, log an error */ + if (unlikely(ecount == 0)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0371 No entry from the CQ: identifier " + "(x%x), type (%d)\n", cq->queue_id, cq->type); + + /* In any case, flash and re-arm the RCQ */ + lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry + * @eqe: Pointer to fast-path completion queue entry. + * + * This routine process a fast-path work queue completion entry from fast-path + * event queue for FCP command response completion. + **/ +static void +lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, + struct lpfc_wcqe_complete *wcqe) +{ + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; + struct lpfc_iocbq *cmdiocbq; + struct lpfc_iocbq irspiocbq; + unsigned long iflags; + + spin_lock_irqsave(&phba->hbalock, iflags); + pring->stats.iocb_event++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Check for response status */ + if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { + /* If resource errors reported from HBA, reduce queue + * depth of the SCSI device. + */ + if ((bf_get(lpfc_wcqe_c_status, wcqe) == + IOSTAT_LOCAL_REJECT) && + (wcqe->parameter == IOERR_NO_RESOURCES)) { + phba->lpfc_rampdown_queue_depth(phba); + } + /* Log the error status */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0373 FCP complete error: status=x%x, " + "hw_status=x%x, total_data_specified=%d, " + "parameter=x%x, word3=x%x\n", + bf_get(lpfc_wcqe_c_status, wcqe), + bf_get(lpfc_wcqe_c_hw_status, wcqe), + wcqe->total_data_placed, wcqe->parameter, + wcqe->word3); + } + + /* Look up the FCP command IOCB and create pseudo response IOCB */ + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + spin_unlock_irqrestore(&phba->hbalock, iflags); + if (unlikely(!cmdiocbq)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0374 FCP complete with no corresponding " + "cmdiocb: iotag (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + return; + } + if (unlikely(!cmdiocbq->iocb_cmpl)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0375 FCP cmdiocb not callback function " + "iotag: (%d)\n", + bf_get(lpfc_wcqe_c_request_tag, wcqe)); + return; + } + + /* Fake the irspiocb and copy necessary response information */ + lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); + + /* Pass the cmd_iocb and the rsp state to the upper layer */ + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); +} + +/** + * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event + * @phba: Pointer to HBA context object. + * @cq: Pointer to completion queue. + * @wcqe: Pointer to work-queue completion queue entry. + * + * This routine handles an fast-path WQ entry comsumed event by invoking the + * proper WQ release routine to the slow-path WQ. + **/ +static void +lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_wcqe_release *wcqe) +{ + struct lpfc_queue *childwq; + bool wqid_matched = false; + uint16_t fcp_wqid; + + /* Check for fast-path FCP work queue release */ + fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); + list_for_each_entry(childwq, &cq->child_list, list) { + if (childwq->queue_id == fcp_wqid) { + lpfc_sli4_wq_release(childwq, + bf_get(lpfc_wcqe_r_wqe_index, wcqe)); + wqid_matched = true; + break; + } + } + /* Report warning log message if no match found */ + if (wqid_matched != true) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2580 Fast-path wqe consume event carries " + "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); +} + +/** + * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry + * @cq: Pointer to the completion queue. + * @eqe: Pointer to fast-path completion queue entry. + * + * This routine process a fast-path work queue completion entry from fast-path + * event queue for FCP command response completion. + **/ +static int +lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_cqe *cqe) +{ + struct lpfc_wcqe_release wcqe; + bool workposted = false; + + /* Copy the work queue CQE and convert endian order if needed */ + lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); + + /* Check and process for different type of WCQE and dispatch */ + switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { + case CQE_CODE_COMPL_WQE: + /* Process the WQ complete event */ + lpfc_sli4_fp_handle_fcp_wcqe(phba, + (struct lpfc_wcqe_complete *)&wcqe); + break; + case CQE_CODE_RELEASE_WQE: + /* Process the WQ release event */ + lpfc_sli4_fp_handle_rel_wcqe(phba, cq, + (struct lpfc_wcqe_release *)&wcqe); + break; + case CQE_CODE_XRI_ABORTED: + /* Process the WQ XRI abort event */ + workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, + (struct sli4_wcqe_xri_aborted *)&wcqe); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0144 Not a valid WCQE code: x%x\n", + bf_get(lpfc_wcqe_c_code, &wcqe)); + break; + } + return workposted; +} + +/** + * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry + * @phba: Pointer to HBA context object. + * @eqe: Pointer to fast-path event queue entry. + * + * This routine process a event queue entry from the fast-path event queue. + * It will check the MajorCode and MinorCode to determine this is for a + * completion event on a completion queue, if not, an error shall be logged + * and just return. Otherwise, it will get to the corresponding completion + * queue and process all the entries on the completion queue, rearm the + * completion queue, and then return. + **/ +static void +lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, + uint32_t fcp_cqidx) +{ + struct lpfc_queue *cq; + struct lpfc_cqe *cqe; + bool workposted = false; + uint16_t cqid; + int ecount = 0; + + if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || + unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0366 Not a valid fast-path completion " + "event: majorcode=x%x, minorcode=x%x\n", + bf_get(lpfc_eqe_major_code, eqe), + bf_get(lpfc_eqe_minor_code, eqe)); + return; + } + + cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; + if (unlikely(!cq)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0367 Fast-path completion queue does not " + "exist\n"); + return; + } + + /* Get the reference to the corresponding CQ */ + cqid = bf_get(lpfc_eqe_resource_id, eqe); + if (unlikely(cqid != cq->queue_id)) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0368 Miss-matched fast-path completion " + "queue identifier: eqcqid=%d, fcpcqid=%d\n", + cqid, cq->queue_id); + return; + } + + /* Process all the entries to the CQ */ + while ((cqe = lpfc_sli4_cq_get(cq))) { + workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); + } + + /* Catch the no cq entry condition */ + if (unlikely(ecount == 0)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0369 No entry from fast-path completion " + "queue fcpcqid=%d\n", cq->queue_id); + + /* In any case, flash and re-arm the CQ */ + lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +static void +lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) +{ + struct lpfc_eqe *eqe; + + /* walk all the EQ entries and drop on the floor */ + while ((eqe = lpfc_sli4_eq_get(eq))) + ; + + /* Clear and re-arm the EQ */ + lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); +} + +/** + * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-4 interface spec is enabled with + * MSI-X multi-message interrupt mode and there are slow-path events in + * the HBA. However, when the device is enabled with either MSI or Pin-IRQ + * interrupt mode, this function is called as part of the device-level + * interrupt handler. When the PCI slot is in error recovery or the HBA is + * undergoing initialization, the interrupt handler will not process the + * interrupt. The link attention and ELS ring attention events are handled + * by the worker thread. The interrupt handler signals the worker thread + * and returns for these events. This function is called without any lock + * held. It gets the hbalock to access and update SLI data structures. + * + * This function returns IRQ_HANDLED when interrupt is handled else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_sp_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + struct lpfc_queue *speq; + struct lpfc_eqe *eqe; + unsigned long iflag; + int ecount = 0; + + /* + * Get the driver's phba structure from the dev_id + */ + phba = (struct lpfc_hba *)dev_id; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + speq = phba->sli4_hba.sp_eq; + + /* Check device state for handling interrupt */ + if (unlikely(lpfc_intr_state_check(phba))) { + /* Check again for link_state with lock held */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (phba->link_state < LPFC_LINK_DOWN) + /* Flush, clear interrupt, and rearm the EQ */ + lpfc_sli4_eq_flush(phba, speq); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_NONE; + } + + /* + * Process all the event on FCP slow-path EQ + */ + while ((eqe = lpfc_sli4_eq_get(speq))) { + lpfc_sli4_sp_handle_eqe(phba, eqe); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); + } + + /* Always clear and re-arm the slow-path EQ */ + lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); + + /* Catch the no cq entry condition */ + if (unlikely(ecount == 0)) { + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0357 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + + return IRQ_HANDLED; +} /* lpfc_sli4_sp_intr_handler */ + +/** + * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-4 interface spec is enabled with + * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB + * ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The SCSI FCP fast-path ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. Note that, + * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is + * equal to that of FCP CQ index. + * + * This function returns IRQ_HANDLED when interrupt is handled else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_fp_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + struct lpfc_fcp_eq_hdl *fcp_eq_hdl; + struct lpfc_queue *fpeq; + struct lpfc_eqe *eqe; + unsigned long iflag; + int ecount = 0; + uint32_t fcp_eqidx; + + /* Get the driver's phba structure from the dev_id */ + fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; + phba = fcp_eq_hdl->phba; + fcp_eqidx = fcp_eq_hdl->idx; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; + + /* Check device state for handling interrupt */ + if (unlikely(lpfc_intr_state_check(phba))) { + /* Check again for link_state with lock held */ + spin_lock_irqsave(&phba->hbalock, iflag); + if (phba->link_state < LPFC_LINK_DOWN) + /* Flush, clear interrupt, and rearm the EQ */ + lpfc_sli4_eq_flush(phba, fpeq); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return IRQ_NONE; + } + + /* + * Process all the event on FCP fast-path EQ + */ + while ((eqe = lpfc_sli4_eq_get(fpeq))) { + lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); + if (!(++ecount % LPFC_GET_QE_REL_INT)) + lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); + } + + /* Always clear and re-arm the fast-path EQ */ + lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); + + if (unlikely(ecount == 0)) { + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0358 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + + return IRQ_HANDLED; +} /* lpfc_sli4_fp_intr_handler */ + +/** + * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is the device-level interrupt handler to device with SLI-4 + * interface spec, called from the PCI layer when either MSI or Pin-IRQ + * interrupt mode is enabled and there is an event in the HBA which requires + * driver attention. This function invokes the slow-path interrupt attention + * handling function and fast-path interrupt attention handling function in + * turn to process the relevant HBA attention events. This function is called + * without any lock held. It gets the hbalock to access and update SLI data + * structures. + * + * This function returns IRQ_HANDLED when interrupt is handled, else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_intr_handler(int irq, void *dev_id) +{ + struct lpfc_hba *phba; + irqreturn_t sp_irq_rc, fp_irq_rc; + bool fp_handled = false; + uint32_t fcp_eqidx; + + /* Get the driver's phba structure from the dev_id */ + phba = (struct lpfc_hba *)dev_id; + + if (unlikely(!phba)) + return IRQ_NONE; + + /* + * Invokes slow-path host attention interrupt handling as appropriate. + */ + sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); + + /* + * Invoke fast-path host attention interrupt handling as appropriate. + */ + for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { + fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, + &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); + if (fp_irq_rc == IRQ_HANDLED) + fp_handled |= true; + } + + return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; +} /* lpfc_sli4_intr_handler */ + +/** + * lpfc_sli4_queue_free - free a queue structure and associated memory + * @queue: The queue structure to free. + * + * This function frees a queue structure and the DMAable memeory used for + * the host resident queue. This function must be called after destroying the + * queue on the HBA. + **/ +void +lpfc_sli4_queue_free(struct lpfc_queue *queue) +{ + struct lpfc_dmabuf *dmabuf; + + if (!queue) + return; + + while (!list_empty(&queue->page_list)) { + list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, + list); + dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE, + dmabuf->virt, dmabuf->phys); + kfree(dmabuf); + } + kfree(queue); + return; +} + +/** + * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure + * @phba: The HBA that this queue is being created on. + * @entry_size: The size of each queue entry for this queue. + * @entry count: The number of entries that this queue will handle. + * + * This function allocates a queue structure and the DMAable memory used for + * the host resident queue. This function must be called before creating the + * queue on the HBA. + **/ +struct lpfc_queue * +lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, + uint32_t entry_count) +{ + struct lpfc_queue *queue; + struct lpfc_dmabuf *dmabuf; + int x, total_qe_count; + void *dma_pointer; + + + queue = kzalloc(sizeof(struct lpfc_queue) + + (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); + if (!queue) + return NULL; + queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; + INIT_LIST_HEAD(&queue->list); + INIT_LIST_HEAD(&queue->page_list); + INIT_LIST_HEAD(&queue->child_list); + for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { + dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!dmabuf) + goto out_fail; + dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, + PAGE_SIZE, &dmabuf->phys, + GFP_KERNEL); + if (!dmabuf->virt) { + kfree(dmabuf); + goto out_fail; + } + dmabuf->buffer_tag = x; + list_add_tail(&dmabuf->list, &queue->page_list); + /* initialize queue's entry array */ + dma_pointer = dmabuf->virt; + for (; total_qe_count < entry_count && + dma_pointer < (PAGE_SIZE + dmabuf->virt); + total_qe_count++, dma_pointer += entry_size) { + queue->qe[total_qe_count].address = dma_pointer; + } + } + queue->entry_size = entry_size; + queue->entry_count = entry_count; + queue->phba = phba; + + return queue; +out_fail: + lpfc_sli4_queue_free(queue); + return NULL; +} + +/** + * lpfc_eq_create - Create an Event Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @eq: The queue structure to use to create the event queue. + * @imax: The maximum interrupt per second limit. + * + * This function creates an event queue, as detailed in @eq, on a port, + * described by @phba by sending an EQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @eq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. This + * function will send the EQ_CREATE mailbox command to the HBA to setup the + * event queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) +{ + struct lpfc_mbx_eq_create *eq_create; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + struct lpfc_dmabuf *dmabuf; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + uint16_t dmult; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_eq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_EQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + eq_create = &mbox->u.mqe.un.eq_create; + bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, + eq->page_count); + bf_set(lpfc_eq_context_size, &eq_create->u.request.context, + LPFC_EQE_SIZE); + bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); + /* Calculate delay multiper from maximum interrupt per second */ + dmult = LPFC_DMULT_CONST/imax - 1; + bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, + dmult); + switch (eq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0360 Unsupported EQ count. (%d)\n", + eq->entry_count); + if (eq->entry_count < 256) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 256: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_256); + break; + case 512: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_512); + break; + case 1024: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_1024); + break; + case 2048: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_2048); + break; + case 4096: + bf_set(lpfc_eq_context_count, &eq_create->u.request.context, + LPFC_EQ_CNT_4096); + break; + } + list_for_each_entry(dmabuf, &eq->page_list, list) { + eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + mbox->vport = phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->context1 = NULL; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2500 EQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + eq->type = LPFC_EQ; + eq->subtype = LPFC_NONE; + eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); + if (eq->queue_id == 0xFFFF) + status = -ENXIO; + eq->host_index = 0; + eq->hba_index = 0; + + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_cq_create - Create a Completion Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @cq: The queue structure to use to create the completion queue. + * @eq: The event queue to bind this completion queue to. + * + * This function creates a completion queue, as detailed in @wq, on a port, + * described by @phba by sending a CQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @eq + * is used to indicate which event queue to bind this completion queue to. This + * function will send the CQ_CREATE mailbox command to the HBA to setup the + * completion queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, + struct lpfc_queue *eq, uint32_t type, uint32_t subtype) +{ + struct lpfc_mbx_cq_create *cq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_cq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_CQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + cq_create = &mbox->u.mqe.un.cq_create; + bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, + cq->page_count); + bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); + bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); + bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); + switch (cq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0361 Unsupported CQ count. (%d)\n", + cq->entry_count); + if (cq->entry_count < 256) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 256: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_256); + break; + case 512: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_512); + break; + case 1024: + bf_set(lpfc_cq_context_count, &cq_create->u.request.context, + LPFC_CQ_CNT_1024); + break; + } + list_for_each_entry(dmabuf, &cq->page_list, list) { + cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2501 CQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + if (cq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + /* link the cq onto the parent eq child list */ + list_add_tail(&cq->list, &eq->child_list); + /* Set up completion queue's type and subtype */ + cq->type = type; + cq->subtype = subtype; + cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + cq->host_index = 0; + cq->hba_index = 0; +out: + + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_mq_create - Create a mailbox Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @mq: The queue structure to use to create the mailbox queue. + * + * This function creates a mailbox queue, as detailed in @mq, on a port, + * described by @phba by sending a MQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @cq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. This + * function will send the MQ_CREATE mailbox command to the HBA to setup the + * mailbox queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, + struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_mq_create *mq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_mq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + mq_create = &mbox->u.mqe.un.mq_create; + bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, + mq->page_count); + bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); + switch (mq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0362 Unsupported MQ count. (%d)\n", + mq->entry_count); + if (mq->entry_count < 16) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 16: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_16); + break; + case 32: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_32); + break; + case 64: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_64); + break; + case 128: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_128); + break; + } + list_for_each_entry(dmabuf, &mq->page_list, list) { + mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2502 MQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response); + if (mq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + mq->type = LPFC_MQ; + mq->subtype = subtype; + mq->host_index = 0; + mq->hba_index = 0; + + /* link the mq onto the parent cq child list */ + list_add_tail(&mq->list, &cq->child_list); +out: + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_wq_create - Create a Work Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @wq: The queue structure to use to create the work queue. + * @cq: The completion queue to bind this work queue to. + * @subtype: The subtype of the work queue indicating its functionality. + * + * This function creates a work queue, as detailed in @wq, on a port, described + * by @phba by sending a WQ_CREATE mailbox command to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @wq struct + * is used to get the entry count and entry size that are necessary to + * determine the number of pages to allocate and use for this queue. The @cq + * is used to indicate which completion queue to bind this work queue to. This + * function will send the WQ_CREATE mailbox command to the HBA to setup the + * work queue. This function is asynchronous and will wait for the mailbox + * command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, + struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_wq_create *wq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_wq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + wq_create = &mbox->u.mqe.un.wq_create; + bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, + wq->page_count); + bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, + cq->queue_id); + list_for_each_entry(dmabuf, &wq->page_list, list) { + wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2503 WQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); + if (wq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + wq->type = LPFC_WQ; + wq->subtype = subtype; + wq->host_index = 0; + wq->hba_index = 0; + + /* link the wq onto the parent cq child list */ + list_add_tail(&wq->list, &cq->child_list); +out: + if (rc == MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_rq_create - Create a Receive Queue on the HBA + * @phba: HBA structure that indicates port to create a queue on. + * @hrq: The queue structure to use to create the header receive queue. + * @drq: The queue structure to use to create the data receive queue. + * @cq: The completion queue to bind this work queue to. + * + * This function creates a receive buffer queue pair , as detailed in @hrq and + * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command + * to the HBA. + * + * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq + * struct is used to get the entry count that is necessary to determine the + * number of pages to use for this queue. The @cq is used to indicate which + * completion queue to bind received buffers that are posted to these queues to. + * This function will send the RQ_CREATE mailbox command to the HBA to setup the + * receive queue pair. This function is asynchronous and will wait for the + * mailbox command to finish before continuing. + * + * On success this function will return a zero. If unable to allocate enough + * memory this function will return ENOMEM. If the queue create mailbox command + * fails this function will return ENXIO. + **/ +uint32_t +lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) +{ + struct lpfc_mbx_rq_create *rq_create; + struct lpfc_dmabuf *dmabuf; + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (hrq->entry_count != drq->entry_count) + return -EINVAL; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_rq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + rq_create = &mbox->u.mqe.un.rq_create; + switch (hrq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2535 Unsupported RQ count. (%d)\n", + hrq->entry_count); + if (hrq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, + hrq->page_count); + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_HDR_BUF_SIZE); + list_for_each_entry(dmabuf, &hrq->page_list, list) { + rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2504 RQ_CREATE mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + goto out; + } + hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (hrq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + hrq->type = LPFC_HRQ; + hrq->subtype = subtype; + hrq->host_index = 0; + hrq->hba_index = 0; + + /* now create the data queue */ + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + switch (drq->entry_count) { + default: + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2536 Unsupported RQ count. (%d)\n", + drq->entry_count); + if (drq->entry_count < 512) + return -EINVAL; + /* otherwise default to smallest count (drop through) */ + case 512: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_512); + break; + case 1024: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_1024); + break; + case 2048: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_2048); + break; + case 4096: + bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, + LPFC_RQ_RING_SIZE_4096); + break; + } + bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, + drq->page_count); + bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, + LPFC_DATA_BUF_SIZE); + list_for_each_entry(dmabuf, &drq->page_list, list) { + rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + status = -ENXIO; + goto out; + } + drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); + if (drq->queue_id == 0xFFFF) { + status = -ENXIO; + goto out; + } + drq->type = LPFC_DRQ; + drq->subtype = subtype; + drq->host_index = 0; + drq->hba_index = 0; + + /* link the header and data RQs onto the parent cq child list */ + list_add_tail(&hrq->list, &cq->child_list); + list_add_tail(&drq->list, &cq->child_list); + +out: + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_eq_destroy - Destroy an event Queue on the HBA + * @eq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @eq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @eq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!eq) + return -ENODEV; + mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_eq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_EQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, + eq->queue_id); + mbox->vport = eq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + + rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2505 EQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + + /* Remove eq from any list */ + list_del_init(&eq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, eq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_cq_destroy - Destroy a Completion Queue on the HBA + * @cq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @cq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @cq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!cq) + return -ENODEV; + mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_cq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_CQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, + cq->queue_id); + mbox->vport = cq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.wq_create.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2506 CQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove cq from any list */ + list_del_init(&cq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, cq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA + * @qm: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @mq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @mq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!mq) + return -ENODEV; + mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_mq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, + mq->queue_id); + mbox->vport = mq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2507 MQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove mq from any list */ + list_del_init(&mq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, mq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_wq_destroy - Destroy a Work Queue on the HBA + * @wq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @wq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @wq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!wq) + return -ENODEV; + mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_wq_destroy) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, + wq->queue_id); + mbox->vport = wq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2508 WQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + /* Remove wq from any list */ + list_del_init(&wq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, wq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_rq_destroy - Destroy a Receive Queue on the HBA + * @rq: The queue structure associated with the queue to destroy. + * + * This function destroys a queue, as detailed in @rq by sending an mailbox + * command, specific to the type of queue, to the HBA. + * + * The @rq struct is used to get the queue ID of the queue to destroy. + * + * On success this function will return a zero. If the queue destroy mailbox + * command fails this function will return ENXIO. + **/ +uint32_t +lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, + struct lpfc_queue *drq) +{ + LPFC_MBOXQ_t *mbox; + int rc, length, status = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (!hrq || !drq) + return -ENODEV; + mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + length = (sizeof(struct lpfc_mbx_rq_destroy) - + sizeof(struct mbox_header)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, + length, LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, + hrq->queue_id); + mbox->vport = hrq->phba->pport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2509 RQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, hrq->phba->mbox_mem_pool); + return -ENXIO; + } + bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, + drq->queue_id); + rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2510 RQ_DESTROY mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + status = -ENXIO; + } + list_del_init(&hrq->list); + list_del_init(&drq->list); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, hrq->phba->mbox_mem_pool); + return status; +} + +/** + * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA + * @phba: The virtual port for which this call being executed. + * @pdma_phys_addr0: Physical address of the 1st SGL page. + * @pdma_phys_addr1: Physical address of the 2nd SGL page. + * @xritag: the xritag that ties this io to the SGL pages. + * + * This routine will post the sgl pages for the IO that has the xritag + * that is in the iocbq structure. The xritag is assigned during iocbq + * creation and persists for as long as the driver is loaded. + * if the caller has fewer than 256 scatter gather segments to map then + * pdma_phys_addr1 should be 0. + * If the caller needs to map more than 256 scatter gather segment then + * pdma_phys_addr1 should be a valid physical address. + * physical address for SGLs must be 64 byte aligned. + * If you are going to map 2 SGL's then the first one must have 256 entries + * the second sgl can have between 1 and 256 entries. + * + * Return codes: + * 0 - Success + * -ENXIO, -ENOMEM - Failure + **/ +int +lpfc_sli4_post_sgl(struct lpfc_hba *phba, + dma_addr_t pdma_phys_addr0, + dma_addr_t pdma_phys_addr1, + uint16_t xritag) +{ + struct lpfc_mbx_post_sgl_pages *post_sgl_pages; + LPFC_MBOXQ_t *mbox; + int rc; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + if (xritag == NO_XRI) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0364 Invalid param:\n"); + return -EINVAL; + } + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, + sizeof(struct lpfc_mbx_post_sgl_pages) - + sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + + post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) + &mbox->u.mqe.un.post_sgl_pages; + bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); + bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); + + post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_addr0)); + post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); + + post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_addr1)); + post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2511 POST_SGL mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return 0; +} +/** + * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA + * @phba: The virtual port for which this call being executed. + * + * This routine will remove all of the sgl pages registered with the hba. + * + * Return codes: + * 0 - Success + * -ENXIO, -ENOMEM - Failure + **/ +int +lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mbox; + int rc; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0, + LPFC_SLI4_MBX_EMBED); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) + &mbox->u.mqe.un.sli4_config.header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mbox, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2512 REMOVE_ALL_SGL_PAGES mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_next_xritag - Get an xritag for the io + * @phba: Pointer to HBA context object. + * + * This function gets an xritag for the iocb. If there is no unused xritag + * it will return 0xffff. + * The function returns the allocated xritag if successful, else returns zero. + * Zero is not a valid xritag. + * The caller is not required to hold any lock. + **/ +uint16_t +lpfc_sli4_next_xritag(struct lpfc_hba *phba) +{ + uint16_t xritag; + + spin_lock_irq(&phba->hbalock); + xritag = phba->sli4_hba.next_xri; + if ((xritag != (uint16_t) -1) && xritag < + (phba->sli4_hba.max_cfg_param.max_xri + + phba->sli4_hba.max_cfg_param.xri_base)) { + phba->sli4_hba.next_xri++; + phba->sli4_hba.max_cfg_param.xri_used++; + spin_unlock_irq(&phba->hbalock); + return xritag; + } + spin_unlock_irq(&phba->hbalock); + + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2004 Failed to allocate XRI.last XRITAG is %d" + " Max XRI is %d, Used XRI is %d\n", + phba->sli4_hba.next_xri, + phba->sli4_hba.max_cfg_param.max_xri, + phba->sli4_hba.max_cfg_param.xri_used); + return -1; +} + +/** + * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post a block of driver's sgl pages to the + * HBA using non-embedded mailbox command. No Lock is held. This routine + * is only called when the driver is loading and after all IO has been + * stopped. + **/ +int +lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) +{ + struct lpfc_sglq *sglq_entry; + struct lpfc_mbx_post_uembed_sgl_page1 *sgl; + struct sgl_page_pairs *sgl_pg_pairs; + void *viraddr; + LPFC_MBOXQ_t *mbox; + uint32_t reqlen, alloclen, pg_pairs; + uint32_t mbox_tmo; + uint16_t xritag_start = 0; + int els_xri_cnt, rc = 0; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* The number of sgls to be posted */ + els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); + + reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); + if (reqlen > PAGE_SIZE) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "2559 Block sgl registration required DMA " + "size (%d) great than a page\n", reqlen); + return -ENOMEM; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2560 Failed to allocate mbox cmd memory\n"); + return -ENOMEM; + } + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, + LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0285 Allocated DMA memory size (%d) is " + "less than the requested DMA memory " + "size (%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory */ + if (unlikely(!mbox->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2525 Failed to get the non-embedded SGE " + "virtual address\n"); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + viraddr = mbox->sge_array->addr[0]; + + /* Set up the SGL pages in the non-embedded DMA pages */ + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { + sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(sglq_entry->phys)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(sglq_entry->phys)); + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(0)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(0)); + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = sglq_entry->sli4_xritag; + sgl_pg_pairs++; + } + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; + bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + /* Perform endian conversion if necessary */ + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2513 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware + * @phba: pointer to lpfc hba data structure. + * @sblist: pointer to scsi buffer list. + * @count: number of scsi buffers on the list. + * + * This routine is invoked to post a block of @count scsi sgl pages from a + * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. + * No Lock is held. + * + **/ +int +lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, + int cnt) +{ + struct lpfc_scsi_buf *psb; + struct lpfc_mbx_post_uembed_sgl_page1 *sgl; + struct sgl_page_pairs *sgl_pg_pairs; + void *viraddr; + LPFC_MBOXQ_t *mbox; + uint32_t reqlen, alloclen, pg_pairs; + uint32_t mbox_tmo; + uint16_t xritag_start = 0; + int rc = 0; + uint32_t shdr_status, shdr_add_status; + dma_addr_t pdma_phys_bpl1; + union lpfc_sli4_cfg_shdr *shdr; + + /* Calculate the requested length of the dma memory */ + reqlen = cnt * sizeof(struct sgl_page_pairs) + + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); + if (reqlen > PAGE_SIZE) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0217 Block sgl registration required DMA " + "size (%d) great than a page\n", reqlen); + return -ENOMEM; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0283 Failed to allocate mbox cmd memory\n"); + return -ENOMEM; + } + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, + LPFC_SLI4_MBX_NEMBED); + + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2561 Allocated DMA memory size (%d) is " + "less than the requested DMA memory " + "size (%d)\n", alloclen, reqlen); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory */ + if (unlikely(!mbox->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2565 Failed to get the non-embedded SGE " + "virtual address\n"); + lpfc_sli4_mbox_cmd_free(phba, mbox); + return -ENOMEM; + } + viraddr = mbox->sge_array->addr[0]; + + /* Set up the SGL pages in the non-embedded DMA pages */ + sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; + sgl_pg_pairs = &sgl->sgl_pg_pairs; + + pg_pairs = 0; + list_for_each_entry(psb, sblist, list) { + /* Set up the sge entry */ + sgl_pg_pairs->sgl_pg0_addr_lo = + cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); + sgl_pg_pairs->sgl_pg0_addr_hi = + cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); + if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; + else + pdma_phys_bpl1 = 0; + sgl_pg_pairs->sgl_pg1_addr_lo = + cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); + sgl_pg_pairs->sgl_pg1_addr_hi = + cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); + /* Keep the first xritag on the list */ + if (pg_pairs == 0) + xritag_start = psb->cur_iocbq.sli4_xritag; + sgl_pg_pairs++; + pg_pairs++; + } + bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); + bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); + /* Perform endian conversion if necessary */ + sgl->word0 = cpu_to_le32(sgl->word0); + + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); + } + shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + lpfc_sli4_mbox_cmd_free(phba, mbox); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2564 POST_SGL_BLOCK mailbox command failed " + "status x%x add_status x%x mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_fc_frame_check - Check that this frame is a valid frame to handle + * @phba: pointer to lpfc_hba struct that the frame was received on + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * + * This function checks the fields in the @fc_hdr to see if the FC frame is a + * valid type of frame that the LPFC driver will handle. This function will + * return a zero if the frame is a valid frame or a non zero value when the + * frame does not pass the check. + **/ +static int +lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) +{ + char *rctl_names[] = FC_RCTL_NAMES_INIT; + char *type_names[] = FC_TYPE_NAMES_INIT; + struct fc_vft_header *fc_vft_hdr; + + switch (fc_hdr->fh_r_ctl) { + case FC_RCTL_DD_UNCAT: /* uncategorized information */ + case FC_RCTL_DD_SOL_DATA: /* solicited data */ + case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ + case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ + case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ + case FC_RCTL_DD_DATA_DESC: /* data descriptor */ + case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ + case FC_RCTL_DD_CMD_STATUS: /* command status */ + case FC_RCTL_ELS_REQ: /* extended link services request */ + case FC_RCTL_ELS_REP: /* extended link services reply */ + case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ + case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ + case FC_RCTL_BA_NOP: /* basic link service NOP */ + case FC_RCTL_BA_ABTS: /* basic link service abort */ + case FC_RCTL_BA_RMC: /* remove connection */ + case FC_RCTL_BA_ACC: /* basic accept */ + case FC_RCTL_BA_RJT: /* basic reject */ + case FC_RCTL_BA_PRMT: + case FC_RCTL_ACK_1: /* acknowledge_1 */ + case FC_RCTL_ACK_0: /* acknowledge_0 */ + case FC_RCTL_P_RJT: /* port reject */ + case FC_RCTL_F_RJT: /* fabric reject */ + case FC_RCTL_P_BSY: /* port busy */ + case FC_RCTL_F_BSY: /* fabric busy to data frame */ + case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ + case FC_RCTL_LCR: /* link credit reset */ + case FC_RCTL_END: /* end */ + break; + case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ + fc_vft_hdr = (struct fc_vft_header *)fc_hdr; + fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; + return lpfc_fc_frame_check(phba, fc_hdr); + default: + goto drop; + } + switch (fc_hdr->fh_type) { + case FC_TYPE_BLS: + case FC_TYPE_ELS: + case FC_TYPE_FCP: + case FC_TYPE_CT: + break; + case FC_TYPE_IP: + case FC_TYPE_ILS: + default: + goto drop; + } + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "2538 Received frame rctl:%s type:%s\n", + rctl_names[fc_hdr->fh_r_ctl], + type_names[fc_hdr->fh_type]); + return 0; +drop: + lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, + "2539 Dropped frame rctl:%s type:%s\n", + rctl_names[fc_hdr->fh_r_ctl], + type_names[fc_hdr->fh_type]); + return 1; +} + +/** + * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * + * This function processes the FC header to retrieve the VFI from the VF + * header, if one exists. This function will return the VFI if one exists + * or 0 if no VSAN Header exists. + **/ +static uint32_t +lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) +{ + struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; + + if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) + return 0; + return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); +} + +/** + * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to + * @phba: Pointer to the HBA structure to search for the vport on + * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) + * @fcfi: The FC Fabric ID that the frame came from + * + * This function searches the @phba for a vport that matches the content of the + * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the + * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function + * returns the matching vport pointer or NULL if unable to match frame to a + * vport. + **/ +static struct lpfc_vport * +lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, + uint16_t fcfi) +{ + struct lpfc_vport **vports; + struct lpfc_vport *vport = NULL; + int i; + uint32_t did = (fc_hdr->fh_d_id[0] << 16 | + fc_hdr->fh_d_id[1] << 8 | + fc_hdr->fh_d_id[2]); + + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { + if (phba->fcf.fcfi == fcfi && + vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && + vports[i]->fc_myDID == did) { + vport = vports[i]; + break; + } + } + lpfc_destroy_vport_work_array(phba, vports); + return vport; +} + +/** + * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences + * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame + * + * This function searches through the existing incomplete sequences that have + * been sent to this @vport. If the frame matches one of the incomplete + * sequences then the dbuf in the @dmabuf is added to the list of frames that + * make up that sequence. If no sequence is found that matches this frame then + * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list + * This function returns a pointer to the first dmabuf in the sequence list that + * the frame was linked to. + **/ +static struct hbq_dmabuf * +lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *new_hdr; + struct fc_frame_header *temp_hdr; + struct lpfc_dmabuf *d_buf; + struct lpfc_dmabuf *h_buf; + struct hbq_dmabuf *seq_dmabuf = NULL; + struct hbq_dmabuf *temp_dmabuf = NULL; + + new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* Use the hdr_buf to find the sequence that this frame belongs to */ + list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { + temp_hdr = (struct fc_frame_header *)h_buf->virt; + if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || + (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || + (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) + continue; + /* found a pending sequence that matches this frame */ + seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); + break; + } + if (!seq_dmabuf) { + /* + * This indicates first frame received for this sequence. + * Queue the buffer on the vport's rcv_buffer_list. + */ + list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); + return dmabuf; + } + temp_hdr = seq_dmabuf->hbuf.virt; + if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { + list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); + return dmabuf; + } + /* find the correct place in the sequence to insert this frame */ + list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { + temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; + /* + * If the frame's sequence count is greater than the frame on + * the list then insert the frame right after this frame + */ + if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { + list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); + return seq_dmabuf; + } + } + return NULL; +} + +/** + * lpfc_seq_complete - Indicates if a sequence is complete + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function checks the sequence, starting with the frame described by + * @dmabuf, to see if all the frames associated with this sequence are present. + * the frames associated with this sequence are linked to the @dmabuf using the + * dbuf list. This function looks for two major things. 1) That the first frame + * has a sequence count of zero. 2) There is a frame with last frame of sequence + * set. 3) That there are no holes in the sequence count. The function will + * return 1 when the sequence is complete, otherwise it will return 0. + **/ +static int +lpfc_seq_complete(struct hbq_dmabuf *dmabuf) +{ + struct fc_frame_header *hdr; + struct lpfc_dmabuf *d_buf; + struct hbq_dmabuf *seq_dmabuf; + uint32_t fctl; + int seq_count = 0; + + hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* make sure first fame of sequence has a sequence count of zero */ + if (hdr->fh_seq_cnt != seq_count) + return 0; + fctl = (hdr->fh_f_ctl[0] << 16 | + hdr->fh_f_ctl[1] << 8 | + hdr->fh_f_ctl[2]); + /* If last frame of sequence we can return success. */ + if (fctl & FC_FC_END_SEQ) + return 1; + list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { + seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + /* If there is a hole in the sequence count then fail. */ + if (++seq_count != hdr->fh_seq_cnt) + return 0; + fctl = (hdr->fh_f_ctl[0] << 16 | + hdr->fh_f_ctl[1] << 8 | + hdr->fh_f_ctl[2]); + /* If last frame of sequence we can return success. */ + if (fctl & FC_FC_END_SEQ) + return 1; + } + return 0; +} + +/** + * lpfc_prep_seq - Prep sequence for ULP processing + * @vport: Pointer to the vport on which this sequence was received + * @dmabuf: pointer to a dmabuf that describes the FC sequence + * + * This function takes a sequence, described by a list of frames, and creates + * a list of iocbq structures to describe the sequence. This iocbq list will be + * used to issue to the generic unsolicited sequence handler. This routine + * returns a pointer to the first iocbq in the list. If the function is unable + * to allocate an iocbq then it throw out the received frames that were not + * able to be described and return a pointer to the first iocbq. If unable to + * allocate any iocbqs (including the first) this function will return NULL. + **/ +static struct lpfc_iocbq * +lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) +{ + struct lpfc_dmabuf *d_buf, *n_buf; + struct lpfc_iocbq *first_iocbq, *iocbq; + struct fc_frame_header *fc_hdr; + uint32_t sid; + + fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + /* remove from receive buffer list */ + list_del_init(&seq_dmabuf->hbuf.list); + /* get the Remote Port's SID */ + sid = (fc_hdr->fh_s_id[0] << 16 | + fc_hdr->fh_s_id[1] << 8 | + fc_hdr->fh_s_id[2]); + /* Get an iocbq struct to fill in. */ + first_iocbq = lpfc_sli_get_iocbq(vport->phba); + if (first_iocbq) { + /* Initialize the first IOCB. */ + first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; + first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; + first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); + first_iocbq->iocb.unsli3.rcvsli3.vpi = + vport->vpi + vport->phba->vpi_base; + /* put the first buffer into the first IOCBq */ + first_iocbq->context2 = &seq_dmabuf->dbuf; + first_iocbq->context3 = NULL; + first_iocbq->iocb.ulpBdeCount = 1; + first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; + first_iocbq->iocb.un.rcvels.remoteID = sid; + } + iocbq = first_iocbq; + /* + * Each IOCBq can have two Buffers assigned, so go through the list + * of buffers for this sequence and save two buffers in each IOCBq + */ + list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { + if (!iocbq) { + lpfc_in_buf_free(vport->phba, d_buf); + continue; + } + if (!iocbq->context3) { + iocbq->context3 = d_buf; + iocbq->iocb.ulpBdeCount++; + iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; + } else { + iocbq = lpfc_sli_get_iocbq(vport->phba); + if (!iocbq) { + if (first_iocbq) { + first_iocbq->iocb.ulpStatus = + IOSTAT_FCP_RSP_ERROR; + first_iocbq->iocb.un.ulpWord[4] = + IOERR_NO_RESOURCES; + } + lpfc_in_buf_free(vport->phba, d_buf); + continue; + } + iocbq->context2 = d_buf; + iocbq->context3 = NULL; + iocbq->iocb.ulpBdeCount = 1; + iocbq->iocb.un.cont64[0].tus.f.bdeSize = + LPFC_DATA_BUF_SIZE; + iocbq->iocb.un.rcvels.remoteID = sid; + list_add_tail(&iocbq->list, &first_iocbq->list); + } + } + return first_iocbq; +} + +/** + * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware + * @phba: Pointer to HBA context object. + * + * This function is called with no lock held. This function processes all + * the received buffers and gives it to upper layers when a received buffer + * indicates that it is the final frame in the sequence. The interrupt + * service routine processes received buffers at interrupt contexts and adds + * received dma buffers to the rb_pend_list queue and signals the worker thread. + * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the + * appropriate receive function when the final frame in a sequence is received. + **/ +int +lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) +{ + LIST_HEAD(cmplq); + struct hbq_dmabuf *dmabuf, *seq_dmabuf; + struct fc_frame_header *fc_hdr; + struct lpfc_vport *vport; + uint32_t fcfi; + struct lpfc_iocbq *iocbq; + + /* Clear hba flag and get all received buffers into the cmplq */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~HBA_RECEIVE_BUFFER; + list_splice_init(&phba->rb_pend_list, &cmplq); + spin_unlock_irq(&phba->hbalock); + + /* Process each received buffer */ + while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { + fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; + /* check to see if this a valid type of frame */ + if (lpfc_fc_frame_check(phba, fc_hdr)) { + lpfc_in_buf_free(phba, &dmabuf->dbuf); + continue; + } + fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); + vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); + if (!vport) { + /* throw out the frame */ + lpfc_in_buf_free(phba, &dmabuf->dbuf); + continue; + } + /* Link this frame */ + seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); + if (!seq_dmabuf) { + /* unable to add frame to vport - throw it out */ + lpfc_in_buf_free(phba, &dmabuf->dbuf); + continue; + } + /* If not last frame in sequence continue processing frames. */ + if (!lpfc_seq_complete(seq_dmabuf)) { + /* + * When saving off frames post a new one and mark this + * frame to be freed when it is finished. + **/ + lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); + dmabuf->tag = -1; + continue; + } + fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; + iocbq = lpfc_prep_seq(vport, seq_dmabuf); + if (!lpfc_complete_unsol_iocb(phba, + &phba->sli.ring[LPFC_ELS_RING], + iocbq, fc_hdr->fh_r_ctl, + fc_hdr->fh_type)) + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2540 Ring %d handler: unexpected Rctl " + "x%x Type x%x received\n", + LPFC_ELS_RING, + fc_hdr->fh_r_ctl, fc_hdr->fh_type); + }; + return 0; +} + +/** + * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a PAGE_SIZE memory region to the port to hold up to + * PAGE_SIZE modulo 64 rpi context headers. + * + * This routine does not require any locks. It's usage is expected + * to be driver load or reset recovery when the driver is + * sequential. + * + * Return codes + * 0 - sucessful + * EIO - The mailbox failed to complete successfully. + * When this error occurs, the driver is not guaranteed + * to have any rpi regions posted to the device and + * must either attempt to repost the regions or take a + * fatal error. + **/ +int +lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) +{ + struct lpfc_rpi_hdr *rpi_page; + uint32_t rc = 0; + + /* Post all rpi memory regions to the port. */ + list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { + rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2008 Error %d posting all rpi " + "headers\n", rc); + rc = -EIO; + break; + } + } + + return rc; +} + +/** + * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port + * @phba: pointer to lpfc hba data structure. + * @rpi_page: pointer to the rpi memory region. + * + * This routine is invoked to post a single rpi header to the + * HBA consistent with the SLI-4 interface spec. This memory region + * maps up to 64 rpi context regions. + * + * Return codes + * 0 - sucessful + * ENOMEM - No available memory + * EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; + uint32_t rc = 0; + uint32_t mbox_tmo; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + + /* The port is notified of the header region via a mailbox command. */ + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2001 Unable to allocate memory for issuing " + "SLI_CONFIG_SPECIAL mailbox command\n"); + return -ENOMEM; + } + + /* Post all rpi memory regions to the port. */ + hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, + sizeof(struct lpfc_mbx_post_hdr_tmpl) - + sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, + hdr_tmpl, rpi_page->page_count); + bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, + rpi_page->start_rpi); + hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); + hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2514 POST_RPI_HDR mailbox failed with " + "status x%x add_status x%x, mbx status x%x\n", + shdr_status, shdr_add_status, rc); + rc = -ENXIO; + } + return rc; +} + +/** + * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to post rpi header templates to the + * HBA consistent with the SLI-4 interface spec. This routine + * posts a PAGE_SIZE memory region to the port to hold up to + * PAGE_SIZE modulo 64 rpi context headers. + * + * Returns + * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful + * LPFC_RPI_ALLOC_ERROR if no rpis are available. + **/ +int +lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) +{ + int rpi; + uint16_t max_rpi, rpi_base, rpi_limit; + uint16_t rpi_remaining; + struct lpfc_rpi_hdr *rpi_hdr; + + max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; + rpi_base = phba->sli4_hba.max_cfg_param.rpi_base; + rpi_limit = phba->sli4_hba.next_rpi; + + /* + * The valid rpi range is not guaranteed to be zero-based. Start + * the search at the rpi_base as reported by the port. + */ + spin_lock_irq(&phba->hbalock); + rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); + if (rpi >= rpi_limit || rpi < rpi_base) + rpi = LPFC_RPI_ALLOC_ERROR; + else { + set_bit(rpi, phba->sli4_hba.rpi_bmask); + phba->sli4_hba.max_cfg_param.rpi_used++; + phba->sli4_hba.rpi_count++; + } + + /* + * Don't try to allocate more rpi header regions if the device limit + * on available rpis max has been exhausted. + */ + if ((rpi == LPFC_RPI_ALLOC_ERROR) && + (phba->sli4_hba.rpi_count >= max_rpi)) { + spin_unlock_irq(&phba->hbalock); + return rpi; + } + + /* + * If the driver is running low on rpi resources, allocate another + * page now. Note that the next_rpi value is used because + * it represents how many are actually in use whereas max_rpi notes + * how many are supported max by the device. + */ + rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - + phba->sli4_hba.rpi_count; + spin_unlock_irq(&phba->hbalock); + if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { + rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); + if (!rpi_hdr) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2002 Error Could not grow rpi " + "count\n"); + } else { + lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); + } + } + + return rpi; +} + +/** + * lpfc_sli4_free_rpi - Release an rpi for reuse. + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release an rpi to the pool of + * available rpis maintained by the driver. + **/ +void +lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) +{ + spin_lock_irq(&phba->hbalock); + clear_bit(rpi, phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_count--; + phba->sli4_hba.max_cfg_param.rpi_used--; + spin_unlock_irq(&phba->hbalock); +} + +/** + * lpfc_sli4_remove_rpis - Remove the rpi bitmask region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove the memory region that + * provided rpi via a bitmask. + **/ +void +lpfc_sli4_remove_rpis(struct lpfc_hba *phba) +{ + kfree(phba->sli4_hba.rpi_bmask); +} + +/** + * lpfc_sli4_resume_rpi - Remove the rpi bitmask region + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to remove the memory region that + * provided rpi via a bitmask. + **/ +int +lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_hba *phba = ndlp->phba; + int rc; + + /* The port is notified of the header region via a mailbox command. */ + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* Post all rpi memory regions to the port. */ + lpfc_resume_rpi(mboxq, ndlp); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2010 Resume RPI Mailbox failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + mempool_free(mboxq, phba->mbox_mem_pool); + return -EIO; + } + return 0; +} + +/** + * lpfc_sli4_init_vpi - Initialize a vpi with the port + * @phba: pointer to lpfc hba data structure. + * @vpi: vpi value to activate with the port. + * + * This routine is invoked to activate a vpi with the + * port when the host intends to use vports with a + * nonzero vpi. + * + * Returns: + * 0 success + * -Evalue otherwise + **/ +int +lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) +{ + LPFC_MBOXQ_t *mboxq; + int rc = 0; + uint32_t mbox_tmo; + + if (vpi == 0) + return -EINVAL; + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + lpfc_init_vpi(mboxq, vpi); + mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + if (rc != MBX_TIMEOUT) + mempool_free(mboxq, phba->mbox_mem_pool); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2022 INIT VPI Mailbox failed " + "status %d, mbxStatus x%x\n", rc, + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + rc = -EIO; + } + return rc; +} + +/** + * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: Pointer to mailbox object. + * + * This routine is invoked to manually add a single FCF record. The caller + * must pass a completely initialized FCF_Record. This routine takes + * care of the nonembedded mailbox operations. + **/ +static void +lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + void *virt_addr; + union lpfc_sli4_cfg_shdr *shdr; + uint32_t shdr_status, shdr_add_status; + + virt_addr = mboxq->sge_array->addr[0]; + /* The IOCTL status is embedded in the mailbox subheader. */ + shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + + if ((shdr_status || shdr_add_status) && + (shdr_status != STATUS_FCF_IN_USE)) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2558 ADD_FCF_RECORD mailbox failed with " + "status x%x add_status x%x\n", + shdr_status, shdr_add_status); + + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + +/** + * lpfc_sli4_add_fcf_record - Manually add an FCF Record. + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the initialized fcf record to add. + * + * This routine is invoked to manually add a single FCF record. The caller + * must pass a completely initialized FCF_Record. This routine takes + * care of the nonembedded mailbox operations. + **/ +int +lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) +{ + int rc = 0; + LPFC_MBOXQ_t *mboxq; + uint8_t *bytep; + void *virt_addr; + dma_addr_t phys_addr; + struct lpfc_mbx_sge sge; + uint32_t alloc_len, req_len; + uint32_t fcfindex; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2009 Failed to allocate mbox for ADD_FCF cmd\n"); + return -ENOMEM; + } + + req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + + sizeof(uint32_t); + + /* Allocate DMA memory and set up the non-embedded mailbox command */ + alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_ADD_FCF, + req_len, LPFC_SLI4_MBX_NEMBED); + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2523 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + + /* + * Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); + if (unlikely(!mboxq->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2526 Failed to get the non-embedded SGE " + "virtual address\n"); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + virt_addr = mboxq->sge_array->addr[0]; + /* + * Configure the FCF record for FCFI 0. This is the driver's + * hardcoded default and gets used in nonFIP mode. + */ + fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); + bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); + + /* + * Copy the fcf_index and the FCF Record Data. The data starts after + * the FCoE header plus word10. The data copy needs to be endian + * correct. + */ + bytep += sizeof(uint32_t); + lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2515 ADD_FCF_RECORD mailbox failed with " + "status 0x%x\n", rc); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + rc = -EIO; + } else + rc = 0; + + return rc; +} + +/** + * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the fcf record to write the default data. + * @fcf_index: FCF table entry index. + * + * This routine is invoked to build the driver's default FCF record. The + * values used are hardcoded. This routine handles memory initialization. + * + **/ +void +lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, + struct fcf_record *fcf_record, + uint16_t fcf_index) +{ + memset(fcf_record, 0, sizeof(struct fcf_record)); + fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; + fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; + fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; + bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); + bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); + bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); + bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); + bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); + bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); + bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); + bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); + bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); + bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); + bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); + bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, + LPFC_FCF_FPMA | LPFC_FCF_SPMA); + /* Set the VLAN bit map */ + if (phba->valid_vlan) { + fcf_record->vlan_bitmap[phba->vlan_id / 8] + = 1 << (phba->vlan_id % 8); + } +} + +/** + * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read up to @fcf_num of FCF record from the + * device starting with the given @fcf_index. + **/ +int +lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + void *virt_addr; + dma_addr_t phys_addr; + uint8_t *bytep; + struct lpfc_mbx_sge sge; + uint32_t alloc_len, req_len; + struct lpfc_mbx_read_fcf_tbl *read_fcf; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2000 Failed to allocate mbox for " + "READ_FCF cmd\n"); + return -ENOMEM; + } + + req_len = sizeof(struct fcf_record) + + sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); + + /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ + alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, + LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, + LPFC_SLI4_MBX_NEMBED); + + if (alloc_len < req_len) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0291 Allocated DMA memory size (x%x) is " + "less than the requested DMA memory " + "size (x%x)\n", alloc_len, req_len); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + + /* Get the first SGE entry from the non-embedded DMA memory. This + * routine only uses a single SGE. + */ + lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); + phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); + if (unlikely(!mboxq->sge_array)) { + lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, + "2527 Failed to get the non-embedded SGE " + "virtual address\n"); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return -ENOMEM; + } + virt_addr = mboxq->sge_array->addr[0]; + read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; + + /* Set up command fields */ + bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); + /* Perform necessary endian conversion */ + bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + lpfc_sli4_mbox_cmd_free(phba, mboxq); + error = -EIO; + } else + error = 0; + return error; +} diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 883938652a6a..7d37eb7459bf 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd { LPFC_CTX_HOST } lpfc_ctx_cmd; +/* This structure is used to carry the needed response IOCB states */ +struct lpfc_sli4_rspiocb_info { + uint8_t hw_status; + uint8_t bfield; +#define LPFC_XB 0x1 +#define LPFC_PV 0x2 + uint8_t priority; + uint8_t reserved; +}; + /* This structure is used to handle IOCB requests / responses */ struct lpfc_iocbq { /* lpfc_iocbqs are used in double linked lists */ struct list_head list; struct list_head clist; uint16_t iotag; /* pre-assigned IO tag */ - uint16_t rsvd1; + uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ IOCB_t iocb; /* IOCB cmd */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ @@ -65,7 +75,7 @@ struct lpfc_iocbq { struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - + struct lpfc_sli4_rspiocb_info sli4_info; }; #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ @@ -81,14 +91,18 @@ struct lpfc_iocbq { typedef struct lpfcMboxq { /* MBOXQs are used in single linked lists */ struct list_head list; /* ptr to next mailbox command */ - MAILBOX_t mb; /* Mailbox cmd */ - struct lpfc_vport *vport;/* virutal port pointer */ + union { + MAILBOX_t mb; /* Mailbox cmd */ + struct lpfc_mqe mqe; + } u; + struct lpfc_vport *vport;/* virtual port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); uint8_t mbox_flag; - + struct lpfc_mcqe mcqe; + struct lpfc_mbx_nembed_sge_virt *sge_array; } LPFC_MBOXQ_t; #define MBX_POLL 1 /* poll mailbox till command done, then @@ -230,10 +244,11 @@ struct lpfc_sli { /* Additional sli_flags */ #define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ -#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ +#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */ #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ +#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ struct lpfc_sli_ring ring[LPFC_MAX_RING]; int fcp_ring; /* ring used for FCP initiator commands */ @@ -261,6 +276,8 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ +#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox + command */ #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write * or erase cmds. This is especially * long because of the potential of diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h new file mode 100644 index 000000000000..5196b46608d7 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -0,0 +1,467 @@ +/******************************************************************* + * This file is part of the Emulex Linux Device Driver for * + * Fibre Channel Host Bus Adapters. * + * Copyright (C) 2009 Emulex. All rights reserved. * + * EMULEX and SLI are trademarks of Emulex. * + * www.emulex.com * + * * + * This program is free software; you can redistribute it and/or * + * modify it under the terms of version 2 of the GNU General * + * Public License as published by the Free Software Foundation. * + * This program is distributed in the hope that it will be useful. * + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * + * TO BE LEGALLY INVALID. See the GNU General Public License for * + * more details, a copy of which can be found in the file COPYING * + * included with this package. * + *******************************************************************/ + +#define LPFC_ACTIVE_MBOX_WAIT_CNT 100 +#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 +#define LPFC_GET_QE_REL_INT 32 +#define LPFC_RPI_LOW_WATER_MARK 10 +/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ +#define LPFC_NEMBED_MBOX_SGL_CNT 254 + +/* Multi-queue arrangement for fast-path FCP work queues */ +#define LPFC_FN_EQN_MAX 8 +#define LPFC_SP_EQN_DEF 1 +#define LPFC_FP_EQN_DEF 1 +#define LPFC_FP_EQN_MIN 1 +#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) + +#define LPFC_FN_WQN_MAX 32 +#define LPFC_SP_WQN_DEF 1 +#define LPFC_FP_WQN_DEF 4 +#define LPFC_FP_WQN_MIN 1 +#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF) + +/* + * Provide the default FCF Record attributes used by the driver + * when nonFIP mode is configured and there is no other default + * FCF Record attributes. + */ +#define LPFC_FCOE_FCF_DEF_INDEX 0 +#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF +#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF + +/* First 3 bytes of default FCF MAC is specified by FC_MAP */ +#define LPFC_FCOE_FCF_MAC3 0xFF +#define LPFC_FCOE_FCF_MAC4 0xFF +#define LPFC_FCOE_FCF_MAC5 0xFE +#define LPFC_FCOE_FCF_MAP0 0x0E +#define LPFC_FCOE_FCF_MAP1 0xFC +#define LPFC_FCOE_FCF_MAP2 0x00 +#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC +#define LPFC_FCOE_FKA_ADV_PER 0 +#define LPFC_FCOE_FIP_PRIORITY 0x80 + +enum lpfc_sli4_queue_type { + LPFC_EQ, + LPFC_GCQ, + LPFC_MCQ, + LPFC_WCQ, + LPFC_RCQ, + LPFC_MQ, + LPFC_WQ, + LPFC_HRQ, + LPFC_DRQ +}; + +/* The queue sub-type defines the functional purpose of the queue */ +enum lpfc_sli4_queue_subtype { + LPFC_NONE, + LPFC_MBOX, + LPFC_FCP, + LPFC_ELS, + LPFC_USOL +}; + +union sli4_qe { + void *address; + struct lpfc_eqe *eqe; + struct lpfc_cqe *cqe; + struct lpfc_mcqe *mcqe; + struct lpfc_wcqe_complete *wcqe_complete; + struct lpfc_wcqe_release *wcqe_release; + struct sli4_wcqe_xri_aborted *wcqe_xri_aborted; + struct lpfc_rcqe_complete *rcqe_complete; + struct lpfc_mqe *mqe; + union lpfc_wqe *wqe; + struct lpfc_rqe *rqe; +}; + +struct lpfc_queue { + struct list_head list; + enum lpfc_sli4_queue_type type; + enum lpfc_sli4_queue_subtype subtype; + struct lpfc_hba *phba; + struct list_head child_list; + uint32_t entry_count; /* Number of entries to support on the queue */ + uint32_t entry_size; /* Size of each queue entry. */ + uint32_t queue_id; /* Queue ID assigned by the hardware */ + struct list_head page_list; + uint32_t page_count; /* Number of pages allocated for this queue */ + + uint32_t host_index; /* The host's index for putting or getting */ + uint32_t hba_index; /* The last known hba index for get or put */ + union sli4_qe qe[1]; /* array to index entries (must be last) */ +}; + +struct lpfc_cq_event { + struct list_head list; + union { + struct lpfc_mcqe mcqe_cmpl; + struct lpfc_acqe_link acqe_link; + struct lpfc_acqe_fcoe acqe_fcoe; + struct lpfc_acqe_dcbx acqe_dcbx; + struct lpfc_rcqe rcqe_cmpl; + struct sli4_wcqe_xri_aborted wcqe_axri; + } cqe; +}; + +struct lpfc_sli4_link { + uint8_t speed; + uint8_t duplex; + uint8_t status; + uint8_t physical; + uint8_t fault; +}; + +struct lpfc_fcf { + uint8_t fabric_name[8]; + uint8_t mac_addr[6]; + uint16_t fcf_indx; + uint16_t fcfi; + uint32_t fcf_flag; +#define FCF_AVAILABLE 0x01 /* FCF available for discovery */ +#define FCF_REGISTERED 0x02 /* FCF registered with FW */ +#define FCF_DISCOVERED 0x04 /* FCF discovery started */ +#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ +#define FCF_IN_USE 0x10 /* Atleast one discovery completed */ +#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ + uint32_t priority; + uint32_t addr_mode; + uint16_t vlan_id; +}; + +#define LPFC_REGION23_SIGNATURE "RG23" +#define LPFC_REGION23_VERSION 1 +#define LPFC_REGION23_LAST_REC 0xff +struct lpfc_fip_param_hdr { + uint8_t type; +#define FCOE_PARAM_TYPE 0xA0 + uint8_t length; +#define FCOE_PARAM_LENGTH 2 + uint8_t parm_version; +#define FIPP_VERSION 0x01 + uint8_t parm_flags; +#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 +#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 +#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags +#define FIPP_MODE_ON 0x2 +#define FIPP_MODE_OFF 0x0 +#define FIPP_VLAN_VALID 0x1 +}; + +struct lpfc_fcoe_params { + uint8_t fc_map[3]; + uint8_t reserved1; + uint16_t vlan_tag; + uint8_t reserved[2]; +}; + +struct lpfc_fcf_conn_hdr { + uint8_t type; +#define FCOE_CONN_TBL_TYPE 0xA1 + uint8_t length; /* words */ + uint8_t reserved[2]; +}; + +struct lpfc_fcf_conn_rec { + uint16_t flags; +#define FCFCNCT_VALID 0x0001 +#define FCFCNCT_BOOT 0x0002 +#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */ +#define FCFCNCT_FBNM_VALID 0x0008 +#define FCFCNCT_SWNM_VALID 0x0010 +#define FCFCNCT_VLAN_VALID 0x0020 +#define FCFCNCT_AM_VALID 0x0040 +#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */ +#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */ + + uint16_t vlan_tag; + uint8_t fabric_name[8]; + uint8_t switch_name[8]; +}; + +struct lpfc_fcf_conn_entry { + struct list_head list; + struct lpfc_fcf_conn_rec conn_rec; +}; + +/* + * Define the host's bootstrap mailbox. This structure contains + * the member attributes needed to create, use, and destroy the + * bootstrap mailbox region. + * + * The macro definitions for the bmbx data structure are defined + * in lpfc_hw4.h with the register definition. + */ +struct lpfc_bmbx { + struct lpfc_dmabuf *dmabuf; + struct dma_address dma_address; + void *avirt; + dma_addr_t aphys; + uint32_t bmbx_size; +}; + +#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4 + +#define LPFC_EQE_SIZE_4B 4 +#define LPFC_EQE_SIZE_16B 16 +#define LPFC_CQE_SIZE 16 +#define LPFC_WQE_SIZE 64 +#define LPFC_MQE_SIZE 256 +#define LPFC_RQE_SIZE 8 + +#define LPFC_EQE_DEF_COUNT 1024 +#define LPFC_CQE_DEF_COUNT 256 +#define LPFC_WQE_DEF_COUNT 64 +#define LPFC_MQE_DEF_COUNT 16 +#define LPFC_RQE_DEF_COUNT 512 + +#define LPFC_QUEUE_NOARM false +#define LPFC_QUEUE_REARM true + + +/* + * SLI4 CT field defines + */ +#define SLI4_CT_RPI 0 +#define SLI4_CT_VPI 1 +#define SLI4_CT_VFI 2 +#define SLI4_CT_FCFI 3 + +#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 + +/* + * SLI4 specific data structures + */ +struct lpfc_max_cfg_param { + uint16_t max_xri; + uint16_t xri_base; + uint16_t xri_used; + uint16_t max_rpi; + uint16_t rpi_base; + uint16_t rpi_used; + uint16_t max_vpi; + uint16_t vpi_base; + uint16_t vpi_used; + uint16_t max_vfi; + uint16_t vfi_base; + uint16_t vfi_used; + uint16_t max_fcfi; + uint16_t fcfi_base; + uint16_t fcfi_used; + uint16_t max_eq; + uint16_t max_rq; + uint16_t max_cq; + uint16_t max_wq; +}; + +struct lpfc_hba; +/* SLI4 HBA multi-fcp queue handler struct */ +struct lpfc_fcp_eq_hdl { + uint32_t idx; + struct lpfc_hba *phba; +}; + +/* SLI4 HBA data structure entries */ +struct lpfc_sli4_hba { + void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR0, config space registers */ + void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR1, control registers */ + void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for + PCI BAR2, doorbell registers */ + /* BAR0 PCI config space register memory map */ + void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ + void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ + void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ + void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ +#define LPFC_ONLINE_NERR 0xFFFFFFFF + void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ + /* BAR1 FCoE function CSR register memory map */ + void __iomem *STAregaddr; /* Address to HST_STATE register */ + void __iomem *ISRregaddr; /* Address to HST_ISR register */ + void __iomem *IMRregaddr; /* Address to HST_IMR register */ + void __iomem *ISCRregaddr; /* Address to HST_ISCR register */ + /* BAR2 VF-0 doorbell register memory map */ + void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */ + void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */ + void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */ + void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ + void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ + + struct msix_entry *msix_entries; + uint32_t cfg_eqn; + struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ + /* Pointers to the constructed SLI4 queues */ + struct lpfc_queue **fp_eq; /* Fast-path event queue */ + struct lpfc_queue *sp_eq; /* Slow-path event queue */ + struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ + struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ + struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ + struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ + struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ + struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ + struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ + struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ + struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */ + + /* Setup information for various queue parameters */ + int eq_esize; + int eq_ecount; + int cq_esize; + int cq_ecount; + int wq_esize; + int wq_ecount; + int mq_esize; + int mq_ecount; + int rq_esize; + int rq_ecount; +#define LPFC_SP_EQ_MAX_INTR_SEC 10000 +#define LPFC_FP_EQ_MAX_INTR_SEC 10000 + + uint32_t intr_enable; + struct lpfc_bmbx bmbx; + struct lpfc_max_cfg_param max_cfg_param; + uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ + uint16_t next_rpi; + uint16_t scsi_xri_max; + uint16_t scsi_xri_cnt; + struct list_head lpfc_free_sgl_list; + struct list_head lpfc_sgl_list; + struct lpfc_sglq **lpfc_els_sgl_array; + struct list_head lpfc_abts_els_sgl_list; + struct lpfc_scsi_buf **lpfc_scsi_psb_array; + struct list_head lpfc_abts_scsi_buf_list; + uint32_t total_sglq_bufs; + struct lpfc_sglq **lpfc_sglq_active_list; + struct list_head lpfc_rpi_hdr_list; + unsigned long *rpi_bmask; + uint16_t rpi_count; + struct lpfc_sli4_flags sli4_flags; + struct list_head sp_rspiocb_work_queue; + struct list_head sp_cqe_event_pool; + struct list_head sp_asynce_work_queue; + struct list_head sp_fcp_xri_aborted_work_queue; + struct list_head sp_els_xri_aborted_work_queue; + struct list_head sp_unsol_work_queue; + struct lpfc_sli4_link link_state; + spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ + spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ +}; + +enum lpfc_sge_type { + GEN_BUFF_TYPE, + SCSI_BUFF_TYPE +}; + +struct lpfc_sglq { + /* lpfc_sglqs are used in double linked lists */ + struct list_head list; + struct list_head clist; + enum lpfc_sge_type buff_type; /* is this a scsi sgl */ + uint16_t iotag; /* pre-assigned IO tag */ + uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + struct sli4_sge *sgl; /* pre-assigned SGL */ + void *virt; /* virtual address. */ + dma_addr_t phys; /* physical address */ +}; + +struct lpfc_rpi_hdr { + struct list_head list; + uint32_t len; + struct lpfc_dmabuf *dmabuf; + uint32_t page_count; + uint32_t start_rpi; +}; + +/* + * SLI4 specific function prototypes + */ +int lpfc_pci_function_reset(struct lpfc_hba *); +int lpfc_sli4_hba_setup(struct lpfc_hba *); +int lpfc_sli4_hba_down(struct lpfc_hba *); +int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, + uint8_t, uint32_t, bool); +void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); +void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); +void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, + struct lpfc_mbx_sge *); + +void lpfc_sli4_hba_reset(struct lpfc_hba *); +struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, + uint32_t); +void lpfc_sli4_queue_free(struct lpfc_queue *); +uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); +uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t, uint32_t); +uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); +uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); +uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, struct lpfc_queue *, uint32_t); +uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); +uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *); +int lpfc_sli4_queue_setup(struct lpfc_hba *); +void lpfc_sli4_queue_unset(struct lpfc_hba *); +int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); +int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); +int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *); +uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); +int lpfc_sli4_post_async_mbox(struct lpfc_hba *); +int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); +int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); +struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); +struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); +void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); +void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); +int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *); +int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *); +int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *); +struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); +void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); +int lpfc_sli4_alloc_rpi(struct lpfc_hba *); +void lpfc_sli4_free_rpi(struct lpfc_hba *, int); +void lpfc_sli4_remove_rpis(struct lpfc_hba *); +void lpfc_sli4_async_event_proc(struct lpfc_hba *); +int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); +void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); +void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); +void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, + struct sli4_wcqe_xri_aborted *); +void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, + struct sli4_wcqe_xri_aborted *); +int lpfc_sli4_brdreset(struct lpfc_hba *); +int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); +void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); +int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); +int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); +uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); +uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); +void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); +int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); +void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_post_status_check(struct lpfc_hba *); +uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); + diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index e599519e3078..6b8a148f0a55 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.1" +#define LPFC_DRIVER_VERSION "8.3.2" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 917ad56b0aff..a6313ee84ac5 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -32,8 +32,10 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> +#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" +#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) vpi = 0; else set_bit(vpi, phba->vpi_bmask); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->sli4_hba.max_cfg_param.vpi_used++; spin_unlock_irq(&phba->hbalock); return vpi; } @@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) static void lpfc_free_vpi(struct lpfc_hba *phba, int vpi) { + if (vpi == 0) + return; spin_lock_irq(&phba->hbalock); clear_bit(vpi, phba->vpi_bmask); + if (phba->sli_rev == LPFC_SLI_REV4) + phba->sli4_hba.max_cfg_param.vpi_used--; spin_unlock_irq(&phba->hbalock); } @@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) if (!pmb) { return -ENOMEM; } - mb = &pmb->mb; + mb = &pmb->u.mb; lpfc_read_sparam(phba, pmb, vport->vpi); /* @@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) (vport->fc_flag & wait_flags) || ((vport->port_state > LPFC_VPORT_FAILED) && (vport->port_state < LPFC_VPORT_READY))) { - lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1833 Vport discovery quiesce Wait:" - " vpi x%x state x%x fc_flags x%x" + " state x%x fc_flags x%x" " num_nodes x%x, waiting 1000 msecs" " total wait msecs x%x\n", - vport->vpi, vport->port_state, - vport->fc_flag, vport->num_disc_nodes, + vport->port_state, vport->fc_flag, + vport->num_disc_nodes, jiffies_to_msecs(jiffies - start_time)); msleep(1000); } else { /* Base case. Wait variants satisfied. Break out */ - lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, "1834 Vport discovery quiesced:" - " vpi x%x state x%x fc_flags x%x" + " state x%x fc_flags x%x" " wait msecs x%x\n", - vport->vpi, vport->port_state, - vport->fc_flag, + vport->port_state, vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); break; @@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) } if (time_after(jiffies, wait_time_max)) - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, "1835 Vport discovery quiesce failed:" - " vpi x%x state x%x fc_flags x%x" - " wait msecs x%x\n", - vport->vpi, vport->port_state, - vport->fc_flag, + " state x%x fc_flags x%x wait msecs x%x\n", + vport->port_state, vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); } @@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) goto error_out; } + /* + * In SLI4, the vpi must be activated before it can be used + * by the port. + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_sli4_init_vpi(phba, vpi); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, + "1838 Failed to INIT_VPI on vpi %d " + "status %d\n", vpi, rc); + rc = VPORT_NORESOURCES; + lpfc_free_vpi(phba, vpi); + goto error_out; + } + } /* Assign an unused board number */ if ((instance = lpfc_get_instance()) < 0) { @@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) "physical host\n"); return VPORT_ERROR; } + + /* If the vport is a static vport fail the deletion. */ + if ((vport->vport_flag & STATIC_VPORT) && + !(phba->pport->load_flag & FC_UNLOADING)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + "1837 vport_delete failed: Cannot delete " + "static vport.\n"); + return VPORT_ERROR; + } + /* * If we are not unloading the driver then prevent the vport_delete * from happening until after this vport's discovery is finished. @@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba) struct lpfc_vport *port_iterator; struct lpfc_vport **vports; int index = 0; - vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), + vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *), GFP_KERNEL); if (vports == NULL) return NULL; @@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) int i; if (vports == NULL) return; - for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) + for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++) scsi_host_put(lpfc_shost_from_vport(vports[i])); kfree(vports); } diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 36b1d1052ba1..286c185fa9e4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -61,6 +61,7 @@ #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport_sas.h> #include <scsi/scsi_dbg.h> +#include <scsi/scsi_eh.h> #include "mpt2sas_debug.h" @@ -68,10 +69,10 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "01.100.02.00" +#define MPT2SAS_DRIVER_VERSION "01.100.03.00" #define MPT2SAS_MAJOR_VERSION 01 #define MPT2SAS_MINOR_VERSION 100 -#define MPT2SAS_BUILD_VERSION 02 +#define MPT2SAS_BUILD_VERSION 03 #define MPT2SAS_RELEASE_VERSION 00 /* diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index ba6ab170bdf0..14e473d1fa7b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait) } /** - * _ctl_do_task_abort - assign an active smid to the abort_task + * _ctl_set_task_mid - assign an active smid to tm request * @ioc: per adapter object * @karg - (struct mpt2_ioctl_command) * @tm_request - pointer to mf from user space @@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait) * during failure, the reply frame is filled. */ static int -_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, +_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, Mpi2SCSITaskManagementRequest_t *tm_request) { u8 found = 0; @@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, Mpi2SCSITaskManagementReply_t *tm_reply; u32 sz; u32 lun; + char *desc = NULL; + + if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); @@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); if (!found) { - dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " - "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name, - tm_request->DevHandle, lun)); + dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " + "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name, + desc, tm_request->DevHandle, lun)); tm_reply = ioc->ctl_cmds.reply; tm_reply->DevHandle = tm_request->DevHandle; tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; - tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; + tm_reply->TaskType = tm_request->TaskType; tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; tm_reply->VP_ID = tm_request->VP_ID; tm_reply->VF_ID = tm_request->VF_ID; @@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, return 1; } - dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " - "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name, - tm_request->DevHandle, lun, tm_request->TaskMID)); + dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " + "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, + desc, tm_request->DevHandle, lun, tm_request->TaskMID)); return 0; } @@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, (Mpi2SCSITaskManagementRequest_t *)mpi_request; if (tm_request->TaskType == - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { - if (_ctl_do_task_abort(ioc, &karg, tm_request)) { + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (_ctl_set_task_mid(ioc, &karg, tm_request)) { mpt2sas_base_free_smid(ioc, smid); goto out; } diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index e3a7967259e7..2a01a5f2a84d 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = { MODULE_DEVICE_TABLE(pci, scsih_pci_table); /** - * scsih_set_debug_level - global setting of ioc->logging_level. + * _scsih_set_debug_level - global setting of ioc->logging_level. * * Note: The logging levels are defined in mpt2sas_debug.h. */ static int -scsih_set_debug_level(const char *val, struct kernel_param *kp) +_scsih_set_debug_level(const char *val, struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT2SAS_ADAPTER *ioc; @@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp) ioc->logging_level = logging_level; return 0; } -module_param_call(logging_level, scsih_set_debug_level, param_get_int, +module_param_call(logging_level, _scsih_set_debug_level, param_get_int, &logging_level, 0644); /** @@ -884,6 +884,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id, } /** + * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun + * @ioc: per adapter object + * @id: target id + * @lun: lun number + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id:lun in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + u8 found; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + found = 0; + for (i = 0 ; i < ioc->request_depth; i++) { + if (ioc->scsi_lookup[i].scmd && + (ioc->scsi_lookup[i].scmd->device->id == id && + ioc->scsi_lookup[i].scmd->device->channel == channel && + ioc->scsi_lookup[i].scmd->device->lun == lun)) { + found = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return found; +} + +/** * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) * @ioc: per adapter object * @smid: system request message index @@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, } /** - * scsih_change_queue_depth - setting device queue depth + * _scsih_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth * * Returns queue depth. */ static int -scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth; @@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) } /** - * scsih_change_queue_depth - changing device queue tag type + * _scsih_change_queue_depth - changing device queue tag type * @sdev: scsi device struct * @tag_type: requested tag type * * Returns queue tag type. */ static int -scsih_change_queue_type(struct scsi_device *sdev, int tag_type) +_scsih_change_queue_type(struct scsi_device *sdev, int tag_type) { if (sdev->tagged_supported) { scsi_set_tag_type(sdev, tag_type); @@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type) } /** - * scsih_target_alloc - target add routine + * _scsih_target_alloc - target add routine * @starget: scsi target struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_target_alloc(struct scsi_target *starget) +_scsih_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget) } /** - * scsih_target_destroy - target destroy routine + * _scsih_target_destroy - target destroy routine * @starget: scsi target struct * * Returns nothing. */ static void -scsih_target_destroy(struct scsi_target *starget) +_scsih_target_destroy(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget) } /** - * scsih_slave_alloc - device add routine + * _scsih_slave_alloc - device add routine * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_slave_alloc(struct scsi_device *sdev) +_scsih_slave_alloc(struct scsi_device *sdev) { struct Scsi_Host *shost; struct MPT2SAS_ADAPTER *ioc; @@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev) } /** - * scsih_slave_destroy - device destroy routine + * _scsih_slave_destroy - device destroy routine * @sdev: scsi device struct * * Returns nothing. */ static void -scsih_slave_destroy(struct scsi_device *sdev) +_scsih_slave_destroy(struct scsi_device *sdev) { struct MPT2SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; @@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev) } /** - * scsih_display_sata_capabilities - sata capabilities + * _scsih_display_sata_capabilities - sata capabilities * @ioc: per adapter object * @sas_device: the sas_device object * @sdev: scsi device struct */ static void -scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, +_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device, struct scsi_device *sdev) { Mpi2ConfigReply_t mpi_reply; @@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, } /** - * scsih_slave_configure - device configure routine. + * _scsih_slave_configure - device configure routine. * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -scsih_slave_configure(struct scsi_device *sdev) +_scsih_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev) r_level, raid_device->handle, (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); - scsih_change_queue_depth(sdev, qdepth); + _scsih_change_queue_depth(sdev, qdepth); return 0; } @@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev) sas_device->slot); if (!ssp_target) - scsih_display_sata_capabilities(ioc, sas_device, sdev); + _scsih_display_sata_capabilities(ioc, sas_device, sdev); } - scsih_change_queue_depth(sdev, qdepth); + _scsih_change_queue_depth(sdev, qdepth); if (ssp_target) sas_read_port_mode_page(sdev); @@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev) } /** - * scsih_bios_param - fetch head, sector, cylinder info for a disk + * _scsih_bios_param - fetch head, sector, cylinder info for a disk * @sdev: scsi device struct * @bdev: pointer to block device context * @capacity: device size (in 512 byte sectors) @@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev) * Return nothing. */ static int -scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, +_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int params[]) { int heads; @@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code) } /** - * scsih_tm_done - tm completion routine + * _scsih_tm_done - tm completion routine * @ioc: per adapter object * @smid: system request message index * @VF_ID: virtual function id @@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code) * Return nothing. */ static void -scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) +_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) { MPI2DefaultReply_t *mpi_reply; @@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, } /** - * scsih_abort - eh threads main abort routine + * _scsih_abort - eh threads main abort routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -scsih_abort(struct scsi_cmnd *scmd) +_scsih_abort(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd) return r; } +/** + * _scsih_dev_reset - eh threads main device reset routine + * @sdev: scsi device struct + * + * Returns SUCCESS if command aborted else FAILED + */ +static int +_scsih_dev_reset(struct scsi_cmnd *scmd) +{ + struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT2SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device; + unsigned long flags; + u16 handle; + int r; + + printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n", + ioc->name, scmd); + scsi_print_command(scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", + ioc->name, scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) + handle = sas_device->volume_handle; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + mutex_lock(&ioc->tm_cmds.mutex); + mpt2sas_scsih_issue_tm(ioc, handle, 0, + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun, + 30); + + /* + * sanity check see whether all commands to this device been + * completed + */ + if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id, + scmd->device->lun, scmd->device->channel)) + r = FAILED; + else + r = SUCCESS; + ioc->tm_cmds.status = MPT2_CMD_NOT_USED; + mutex_unlock(&ioc->tm_cmds.mutex); + + out: + printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n", + ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} /** - * scsih_dev_reset - eh threads main device reset routine + * _scsih_target_reset - eh threads main target reset routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -scsih_dev_reset(struct scsi_cmnd *scmd) +_scsih_target_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd) sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { - printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", + printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n", ioc->name, scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); @@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd) } /** - * scsih_abort - eh threads main host reset routine + * _scsih_abort - eh threads main host reset routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -scsih_host_reset(struct scsi_cmnd *scmd) +_scsih_host_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); int r, retval; @@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) } /** - * scsih_qcmd - main scsi request entry point + * _scsih_setup_eedp - setup MPI request for EEDP transfer + * @scmd: pointer to scsi command object + * @mpi_request: pointer to the SCSI_IO reqest message frame + * + * Supporting protection 1 and 3. + * + * Returns nothing + */ +static void +_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) +{ + u16 eedp_flags; + unsigned char prot_op = scsi_get_prot_op(scmd); + unsigned char prot_type = scsi_get_prot_type(scmd); + + if (prot_type == SCSI_PROT_DIF_TYPE0 || + prot_type == SCSI_PROT_DIF_TYPE2 || + prot_op == SCSI_PROT_NORMAL) + return; + + if (prot_op == SCSI_PROT_READ_STRIP) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + else if (prot_op == SCSI_PROT_WRITE_INSERT) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + else + return; + + mpi_request->EEDPBlockSize = scmd->device->sector_size; + + switch (prot_type) { + case SCSI_PROT_DIF_TYPE1: + + /* + * enable ref/guard checking + * auto increment ref tag + */ + mpi_request->EEDPFlags = eedp_flags | + MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + mpi_request->CDB.EEDP32.PrimaryReferenceTag = + cpu_to_be32(scsi_get_lba(scmd)); + + break; + + case SCSI_PROT_DIF_TYPE3: + + /* + * enable guard checking + */ + mpi_request->EEDPFlags = eedp_flags | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + + break; + } +} + +/** + * _scsih_eedp_error_handling - return sense code for EEDP errors + * @scmd: pointer to scsi command object + * @ioc_status: ioc status + * + * Returns nothing + */ +static void +_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) +{ + u8 ascq; + u8 sk; + u8 host_byte; + + switch (ioc_status) { + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + ascq = 0x01; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + ascq = 0x02; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + ascq = 0x03; + break; + default: + ascq = 0x00; + break; + } + + if (scmd->sc_data_direction == DMA_TO_DEVICE) { + sk = ILLEGAL_REQUEST; + host_byte = DID_ABORT; + } else { + sk = ABORTED_COMMAND; + host_byte = DID_OK; + } + + scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq); + scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) | + SAM_STAT_CHECK_CONDITION; +} + +/** + * _scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object * @done: function pointer to be invoked on completion * @@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int -scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) } mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); + _scsih_setup_eedp(scmd, mpi_request); mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) @@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: desc_ioc_state = "scsi ext terminated"; break; + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc_ioc_state = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc_ioc_state = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc_ioc_state = "eedp app tag error"; + break; default: desc_ioc_state = "unknown"; break; @@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) } /** - * scsih_io_done - scsi request callback + * _scsih_io_done - scsi request callback * @ioc: per adapter object * @smid: system request message index * @VF_ID: virtual function id @@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) * Return nothing. */ static void -scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) +_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) { Mpi2SCSIIORequest_t *mpi_request; Mpi2SCSIIOReply_t *mpi_reply; @@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) scmd->result = DID_RESET << 16; break; + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + _scsih_eedp_error_handling(scmd, ioc_status); + break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INVALID_SGL: @@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = { .module = THIS_MODULE, .name = "Fusion MPT SAS Host", .proc_name = MPT2SAS_DRIVER_NAME, - .queuecommand = scsih_qcmd, - .target_alloc = scsih_target_alloc, - .slave_alloc = scsih_slave_alloc, - .slave_configure = scsih_slave_configure, - .target_destroy = scsih_target_destroy, - .slave_destroy = scsih_slave_destroy, - .change_queue_depth = scsih_change_queue_depth, - .change_queue_type = scsih_change_queue_type, - .eh_abort_handler = scsih_abort, - .eh_device_reset_handler = scsih_dev_reset, - .eh_host_reset_handler = scsih_host_reset, - .bios_param = scsih_bios_param, + .queuecommand = _scsih_qcmd, + .target_alloc = _scsih_target_alloc, + .slave_alloc = _scsih_slave_alloc, + .slave_configure = _scsih_slave_configure, + .target_destroy = _scsih_target_destroy, + .slave_destroy = _scsih_slave_destroy, + .change_queue_depth = _scsih_change_queue_depth, + .change_queue_type = _scsih_change_queue_type, + .eh_abort_handler = _scsih_abort, + .eh_device_reset_handler = _scsih_dev_reset, + .eh_target_reset_handler = _scsih_target_reset, + .eh_host_reset_handler = _scsih_host_reset, + .bios_param = _scsih_bios_param, .can_queue = 1, .this_id = -1, .sg_tablesize = MPT2SAS_SG_DEPTH, @@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, } /** - * scsih_remove - detach and remove add host + * _scsih_remove - detach and remove add host * @pdev: PCI device struct * * Return nothing. */ static void __devexit -scsih_remove(struct pci_dev *pdev) +_scsih_remove(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) } /** - * scsih_probe - attach and add scsi host + * _scsih_probe - attach and add scsi host * @pdev: PCI device struct * @id: pci device id * * Returns 0 success, anything else error. */ static int -scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) +_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct MPT2SAS_ADAPTER *ioc; struct Scsi_Host *shost; @@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_add_shost_fail; } + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION); + /* event thread */ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), "fw_event%d", ioc->id); @@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) #ifdef CONFIG_PM /** - * scsih_suspend - power management suspend main entry point + * _scsih_suspend - power management suspend main entry point * @pdev: PCI device struct * @state: PM state change to (usually PCI_D3) * * Returns 0 success, anything else error. */ static int -scsih_suspend(struct pci_dev *pdev, pm_message_t state) +_scsih_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * scsih_resume - power management resume main entry point + * _scsih_resume - power management resume main entry point * @pdev: PCI device struct * * Returns 0 success, anything else error. */ static int -scsih_resume(struct pci_dev *pdev) +_scsih_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev) static struct pci_driver scsih_driver = { .name = MPT2SAS_DRIVER_NAME, .id_table = scsih_pci_table, - .probe = scsih_probe, - .remove = __devexit_p(scsih_remove), + .probe = _scsih_probe, + .remove = __devexit_p(_scsih_remove), #ifdef CONFIG_PM - .suspend = scsih_suspend, - .resume = scsih_resume, + .suspend = _scsih_suspend, + .resume = _scsih_resume, #endif }; /** - * scsih_init - main entry point for this driver. + * _scsih_init - main entry point for this driver. * * Returns 0 success, anything else error. */ static int __init -scsih_init(void) +_scsih_init(void) { int error; @@ -5630,10 +5855,10 @@ scsih_init(void) mpt2sas_base_initialize_callback_handler(); /* queuecommand callback hander */ - scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done); + scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done); /* task managment callback handler */ - tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done); + tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done); /* base internal commands callback handler */ base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); @@ -5659,12 +5884,12 @@ scsih_init(void) } /** - * scsih_exit - exit point for this driver (when it is a module). + * _scsih_exit - exit point for this driver (when it is a module). * * Returns 0 success, anything else error. */ static void __exit -scsih_exit(void) +_scsih_exit(void) { printk(KERN_INFO "mpt2sas version %s unloading\n", MPT2SAS_DRIVER_VERSION); @@ -5682,5 +5907,5 @@ scsih_exit(void) mpt2sas_ctl_exit(); } -module_init(scsih_init); -module_exit(scsih_exit); +module_init(_scsih_init); +module_exit(_scsih_exit); diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 5c65da519e39..686695b155c7 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -264,7 +264,7 @@ struct rep_manu_reply{ }; /** - * transport_expander_report_manufacture - obtain SMP report_manufacture + * _transport_expander_report_manufacture - obtain SMP report_manufacture * @ioc: per adapter object * @sas_address: expander sas address * @edev: the sas_expander_device object @@ -274,7 +274,7 @@ struct rep_manu_reply{ * Returns 0 for success, non-zero for failure. */ static int -transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, +_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, struct sas_expander_device *edev) { Mpi2SmpPassthroughRequest_t *mpi_request; @@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || mpt2sas_port->remote_identify.device_type == MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) - transport_expander_report_manufacture(ioc, + _transport_expander_report_manufacture(ioc, mpt2sas_port->remote_identify.sas_address, rphy_to_expander_device(rphy)); @@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy) } /** - * transport_get_linkerrors - + * _transport_get_linkerrors - * @phy: The sas phy object * * Only support sas_host direct attached phys. @@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy) * */ static int -transport_get_linkerrors(struct sas_phy *phy) +_transport_get_linkerrors(struct sas_phy *phy) { struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); struct _sas_phy *mpt2sas_phy; @@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy) } /** - * transport_get_enclosure_identifier - + * _transport_get_enclosure_identifier - * @phy: The sas phy object * * Obtain the enclosure logical id for an expander. * Returns 0 for success, non-zero for failure. */ static int -transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); struct _sas_node *sas_expander; @@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) } /** - * transport_get_bay_identifier - + * _transport_get_bay_identifier - * @phy: The sas phy object * * Returns the slot id for a device that resides inside an enclosure. */ static int -transport_get_bay_identifier(struct sas_rphy *rphy) +_transport_get_bay_identifier(struct sas_rphy *rphy) { struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); struct _sas_device *sas_device; @@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy) } /** - * transport_phy_reset - + * _transport_phy_reset - * @phy: The sas phy object * @hard_reset: * @@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy) * Returns 0 for success, non-zero for failure. */ static int -transport_phy_reset(struct sas_phy *phy, int hard_reset) +_transport_phy_reset(struct sas_phy *phy, int hard_reset) { struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); struct _sas_phy *mpt2sas_phy; @@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset) } /** - * transport_smp_handler - transport portal for smp passthru + * _transport_smp_handler - transport portal for smp passthru * @shost: shost object * @rphy: sas transport rphy object * @req: @@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset) * smp_rep_general /sys/class/bsg/expander-5:0 */ static int -transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, +_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct request *req) { struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1200,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } struct sas_function_template mpt2sas_transport_functions = { - .get_linkerrors = transport_get_linkerrors, - .get_enclosure_identifier = transport_get_enclosure_identifier, - .get_bay_identifier = transport_get_bay_identifier, - .phy_reset = transport_phy_reset, - .smp_handler = transport_smp_handler, + .get_linkerrors = _transport_get_linkerrors, + .get_enclosure_identifier = _transport_get_enclosure_identifier, + .get_bay_identifier = _transport_get_bay_identifier, + .phy_reset = _transport_phy_reset, + .smp_handler = _transport_smp_handler, }; struct scsi_transport_template *mpt2sas_transport_template; diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c deleted file mode 100644 index e4acebd10d1b..000000000000 --- a/drivers/scsi/mvsas.c +++ /dev/null @@ -1,3222 +0,0 @@ -/* - mvsas.c - Marvell 88SE6440 SAS/SATA support - - Copyright 2007 Red Hat, Inc. - Copyright 2008 Marvell. <kewei@marvell.com> - - This program is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License as - published by the Free Software Foundation; either version 2, - or (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - See the GNU General Public License for more details. - - You should have received a copy of the GNU General Public - License along with this program; see the file COPYING. If not, - write to the Free Software Foundation, 675 Mass Ave, Cambridge, - MA 02139, USA. - - --------------------------------------------------------------- - - Random notes: - * hardware supports controlling the endian-ness of data - structures. this permits elimination of all the le32_to_cpu() - and cpu_to_le32() conversions. - - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/interrupt.h> -#include <linux/spinlock.h> -#include <linux/delay.h> -#include <linux/dma-mapping.h> -#include <linux/ctype.h> -#include <scsi/libsas.h> -#include <scsi/scsi_tcq.h> -#include <scsi/sas_ata.h> -#include <asm/io.h> - -#define DRV_NAME "mvsas" -#define DRV_VERSION "0.5.2" -#define _MV_DUMP 0 -#define MVS_DISABLE_NVRAM -#define MVS_DISABLE_MSI - -#define mr32(reg) readl(regs + MVS_##reg) -#define mw32(reg,val) writel((val), regs + MVS_##reg) -#define mw32_f(reg,val) do { \ - writel((val), regs + MVS_##reg); \ - readl(regs + MVS_##reg); \ - } while (0) - -#define MVS_ID_NOT_MAPPED 0x7f -#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) - -/* offset for D2H FIS in the Received FIS List Structure */ -#define SATA_RECEIVED_D2H_FIS(reg_set) \ - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) -#define SATA_RECEIVED_PIO_FIS(reg_set) \ - ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) -#define UNASSOC_D2H_FIS(id) \ - ((void *) mvi->rx_fis + 0x100 * id) - -#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ - for ((__mc) = (__lseq_mask), (__lseq) = 0; \ - (__mc) != 0 && __rest; \ - (++__lseq), (__mc) >>= 1) - -/* driver compile-time configuration */ -enum driver_configuration { - MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ - MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ - /* software requires power-of-2 - ring size */ - - MVS_SLOTS = 512, /* command slots */ - MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ - MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ - MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ - MVS_OAF_SZ = 64, /* Open address frame buffer size */ - - MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ - - MVS_QUEUE_SIZE = 30, /* Support Queue depth */ - MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ -}; - -/* unchangeable hardware details */ -enum hardware_details { - MVS_MAX_PHYS = 8, /* max. possible phys */ - MVS_MAX_PORTS = 8, /* max. possible ports */ - MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), -}; - -/* peripheral registers (BAR2) */ -enum peripheral_registers { - SPI_CTL = 0x10, /* EEPROM control */ - SPI_CMD = 0x14, /* EEPROM command */ - SPI_DATA = 0x18, /* EEPROM data */ -}; - -enum peripheral_register_bits { - TWSI_RDY = (1U << 7), /* EEPROM interface ready */ - TWSI_RD = (1U << 4), /* EEPROM read access */ - - SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ -}; - -/* enhanced mode registers (BAR4) */ -enum hw_registers { - MVS_GBL_CTL = 0x04, /* global control */ - MVS_GBL_INT_STAT = 0x08, /* global irq status */ - MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ - MVS_GBL_PORT_TYPE = 0xa0, /* port type */ - - MVS_CTL = 0x100, /* SAS/SATA port configuration */ - MVS_PCS = 0x104, /* SAS/SATA port control/status */ - MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ - MVS_CMD_LIST_HI = 0x10C, - MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ - MVS_RX_FIS_HI = 0x114, - - MVS_TX_CFG = 0x120, /* TX configuration */ - MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ - MVS_TX_HI = 0x128, - - MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ - MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ - MVS_RX_CFG = 0x134, /* RX configuration */ - MVS_RX_LO = 0x138, /* RX (completion) ring addr */ - MVS_RX_HI = 0x13C, - MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ - - MVS_INT_COAL = 0x148, /* Int coalescing config */ - MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ - MVS_INT_STAT = 0x150, /* Central int status */ - MVS_INT_MASK = 0x154, /* Central int enable */ - MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ - MVS_INT_MASK_SRS = 0x15C, - - /* ports 1-3 follow after this */ - MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ - MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ - MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ - MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ - - /* ports 1-3 follow after this */ - MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ - MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ - - MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ - MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ - - /* ports 1-3 follow after this */ - MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ - MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ - MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ - MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ - - /* ports 1-3 follow after this */ - MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ - MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ - MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ - MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ -}; - -enum hw_register_bits { - /* MVS_GBL_CTL */ - INT_EN = (1U << 1), /* Global int enable */ - HBA_RST = (1U << 0), /* HBA reset */ - - /* MVS_GBL_INT_STAT */ - INT_XOR = (1U << 4), /* XOR engine event */ - INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ - - /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ - SATA_TARGET = (1U << 16), /* port0 SATA target enable */ - MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ - MODE_AUTO_DET_PORT6 = (1U << 14), - MODE_AUTO_DET_PORT5 = (1U << 13), - MODE_AUTO_DET_PORT4 = (1U << 12), - MODE_AUTO_DET_PORT3 = (1U << 11), - MODE_AUTO_DET_PORT2 = (1U << 10), - MODE_AUTO_DET_PORT1 = (1U << 9), - MODE_AUTO_DET_PORT0 = (1U << 8), - MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | - MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | - MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | - MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, - MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ - MODE_SAS_PORT6_MASK = (1U << 6), - MODE_SAS_PORT5_MASK = (1U << 5), - MODE_SAS_PORT4_MASK = (1U << 4), - MODE_SAS_PORT3_MASK = (1U << 3), - MODE_SAS_PORT2_MASK = (1U << 2), - MODE_SAS_PORT1_MASK = (1U << 1), - MODE_SAS_PORT0_MASK = (1U << 0), - MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | - MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | - MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | - MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, - - /* SAS_MODE value may be - * dictated (in hw) by values - * of SATA_TARGET & AUTO_DET - */ - - /* MVS_TX_CFG */ - TX_EN = (1U << 16), /* Enable TX */ - TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ - - /* MVS_RX_CFG */ - RX_EN = (1U << 16), /* Enable RX */ - RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ - - /* MVS_INT_COAL */ - COAL_EN = (1U << 16), /* Enable int coalescing */ - - /* MVS_INT_STAT, MVS_INT_MASK */ - CINT_I2C = (1U << 31), /* I2C event */ - CINT_SW0 = (1U << 30), /* software event 0 */ - CINT_SW1 = (1U << 29), /* software event 1 */ - CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ - CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ - CINT_MEM = (1U << 26), /* int mem parity err */ - CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ - CINT_SRS = (1U << 3), /* SRS event */ - CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ - CINT_DONE = (1U << 0), /* cmd completion */ - - /* shl for ports 1-3 */ - CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ - CINT_PORT = (1U << 8), /* port0 event */ - CINT_PORT_MASK_OFFSET = 8, - CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), - - /* TX (delivery) ring bits */ - TXQ_CMD_SHIFT = 29, - TXQ_CMD_SSP = 1, /* SSP protocol */ - TXQ_CMD_SMP = 2, /* SMP protocol */ - TXQ_CMD_STP = 3, /* STP/SATA protocol */ - TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ - TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ - TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ - TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ - TXQ_SRS_SHIFT = 20, /* SATA register set */ - TXQ_SRS_MASK = 0x7f, - TXQ_PHY_SHIFT = 12, /* PHY bitmap */ - TXQ_PHY_MASK = 0xff, - TXQ_SLOT_MASK = 0xfff, /* slot number */ - - /* RX (completion) ring bits */ - RXQ_GOOD = (1U << 23), /* Response good */ - RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ - RXQ_CMD_RX = (1U << 20), /* target cmd received */ - RXQ_ATTN = (1U << 19), /* attention */ - RXQ_RSP = (1U << 18), /* response frame xfer'd */ - RXQ_ERR = (1U << 17), /* err info rec xfer'd */ - RXQ_DONE = (1U << 16), /* cmd complete */ - RXQ_SLOT_MASK = 0xfff, /* slot number */ - - /* mvs_cmd_hdr bits */ - MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ - MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ - - /* SSP initiator only */ - MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ - - /* SSP initiator or target */ - MCH_SSP_FR_TASK = 0x1, /* TASK frame */ - - /* SSP target only */ - MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ - MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ - MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ - MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ - - MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ - MCH_FBURST = (1U << 11), /* first burst (SSP) */ - MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ - MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ - MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ - MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ - MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ - MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ - MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ - MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ - - CCTL_RST = (1U << 5), /* port logic reset */ - - /* 0(LSB first), 1(MSB first) */ - CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ - CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ - CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ - CCTL_ENDIAN_CMD = (1U << 0), /* command table */ - - /* MVS_Px_SER_CTLSTAT (per-phy control) */ - PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ - PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ - PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ - PHY_RST = (1U << 0), /* phy reset */ - PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK = - (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), - PHY_READY_MASK = (1U << 20), - - /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ - PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ - PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ - PHYEV_AN = (1U << 18), /* SATA async notification */ - PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ - PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ - PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ - PHYEV_IU_BIG = (1U << 11), /* IU too long err */ - PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ - PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ - PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ - PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ - PHYEV_PORT_SEL = (1U << 6), /* port selector present */ - PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ - PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ - PHYEV_ID_FAIL = (1U << 3), /* identify failed */ - PHYEV_ID_DONE = (1U << 2), /* identify done */ - PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ - PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ - - /* MVS_PCS */ - PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ - PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ - PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ - PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ - PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ - PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ - PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ - PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ - PCS_CMD_RST = (1U << 1), /* reset cmd issue */ - PCS_CMD_EN = (1U << 0), /* enable cmd issue */ - - /* Port n Attached Device Info */ - PORT_DEV_SSP_TRGT = (1U << 19), - PORT_DEV_SMP_TRGT = (1U << 18), - PORT_DEV_STP_TRGT = (1U << 17), - PORT_DEV_SSP_INIT = (1U << 11), - PORT_DEV_SMP_INIT = (1U << 10), - PORT_DEV_STP_INIT = (1U << 9), - PORT_PHY_ID_MASK = (0xFFU << 24), - PORT_DEV_TRGT_MASK = (0x7U << 17), - PORT_DEV_INIT_MASK = (0x7U << 9), - PORT_DEV_TYPE_MASK = (0x7U << 0), - - /* Port n PHY Status */ - PHY_RDY = (1U << 2), - PHY_DW_SYNC = (1U << 1), - PHY_OOB_DTCTD = (1U << 0), - - /* VSR */ - /* PHYMODE 6 (CDB) */ - PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ - PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ - PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ - PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ - PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ - PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ - PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ - PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ - PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ - PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ - PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ - PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ - PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ - PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ -}; - -enum mvs_info_flags { - MVF_MSI = (1U << 0), /* MSI is enabled */ - MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ -}; - -enum sas_cmd_port_registers { - CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ - CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ - CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ - CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ - CMD_OOB_SPACE = 0x110, /* OOB space control register */ - CMD_OOB_BURST = 0x114, /* OOB burst control register */ - CMD_PHY_TIMER = 0x118, /* PHY timer control register */ - CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ - CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ - CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ - CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ - CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ - CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ - CMD_ID_TEST = 0x134, /* ID test register */ - CMD_PL_TIMER = 0x138, /* PL timer register */ - CMD_WD_TIMER = 0x13c, /* WD timer register */ - CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ - CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ - CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ - CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ - CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ - CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ - CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ - CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ - CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ - CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ - CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ - CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ - CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ - CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ - CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ - CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ - CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ - CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ - CMD_RESET_COUNT = 0x188, /* Reset Count */ - CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ - CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ - CMD_PHY_CTL = 0x194, /* PHY Control and Status */ - CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ - CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ - CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ - CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ - CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ - CMD_HOST_CTL = 0x1AC, /* Host Control Status */ - CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ - CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ - CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ - CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ - CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ - CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ -}; - -/* SAS/SATA configuration port registers, aka phy registers */ -enum sas_sata_config_port_regs { - PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ - PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ - PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ - PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ - PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ - PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ - PHYR_SATA_CTL = 0x18, /* SATA control */ - PHYR_PHY_STAT = 0x1C, /* PHY status */ - PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ - PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ - PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ - PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ - PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ - PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ - PHYR_WIDE_PORT = 0x38, /* wide port participating */ - PHYR_CURRENT0 = 0x80, /* current connection info 0 */ - PHYR_CURRENT1 = 0x84, /* current connection info 1 */ - PHYR_CURRENT2 = 0x88, /* current connection info 2 */ -}; - -/* SAS/SATA Vendor Specific Port Registers */ -enum sas_sata_vsp_regs { - VSR_PHY_STAT = 0x00, /* Phy Status */ - VSR_PHY_MODE1 = 0x01, /* phy tx */ - VSR_PHY_MODE2 = 0x02, /* tx scc */ - VSR_PHY_MODE3 = 0x03, /* pll */ - VSR_PHY_MODE4 = 0x04, /* VCO */ - VSR_PHY_MODE5 = 0x05, /* Rx */ - VSR_PHY_MODE6 = 0x06, /* CDR */ - VSR_PHY_MODE7 = 0x07, /* Impedance */ - VSR_PHY_MODE8 = 0x08, /* Voltage */ - VSR_PHY_MODE9 = 0x09, /* Test */ - VSR_PHY_MODE10 = 0x0A, /* Power */ - VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ - VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ - VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ -}; - -enum pci_cfg_registers { - PCR_PHY_CTL = 0x40, - PCR_PHY_CTL2 = 0x90, - PCR_DEV_CTRL = 0xE8, -}; - -enum pci_cfg_register_bits { - PCTL_PWR_ON = (0xFU << 24), - PCTL_OFF = (0xFU << 12), - PRD_REQ_SIZE = (0x4000), - PRD_REQ_MASK = (0x00007000), -}; - -enum nvram_layout_offsets { - NVR_SIG = 0x00, /* 0xAA, 0x55 */ - NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ -}; - -enum chip_flavors { - chip_6320, - chip_6440, - chip_6480, -}; - -enum port_type { - PORT_TYPE_SAS = (1L << 1), - PORT_TYPE_SATA = (1L << 0), -}; - -/* Command Table Format */ -enum ct_format { - /* SSP */ - SSP_F_H = 0x00, - SSP_F_IU = 0x18, - SSP_F_MAX = 0x4D, - /* STP */ - STP_CMD_FIS = 0x00, - STP_ATAPI_CMD = 0x40, - STP_F_MAX = 0x10, - /* SMP */ - SMP_F_T = 0x00, - SMP_F_DEP = 0x01, - SMP_F_MAX = 0x101, -}; - -enum status_buffer { - SB_EIR_OFF = 0x00, /* Error Information Record */ - SB_RFB_OFF = 0x08, /* Response Frame Buffer */ - SB_RFB_MAX = 0x400, /* RFB size*/ -}; - -enum error_info_rec { - CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ - CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ - RSP_OVER = (1U << 29), /* rsp buffer overflow */ - RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ - UNK_FIS = (1U << 27), /* unknown FIS */ - DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ - SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ - TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ - R_ERR = (1U << 23), /* SATA returned R_ERR prim */ - RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ - XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ - UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ - DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ - INTERLOCK = (1U << 15), /* interlock error */ - NAK = (1U << 14), /* NAK rx'd */ - ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ - CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ - OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ - PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ - NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ - STP_RES_BSY = (1U << 8), /* STP resources busy */ - BREAK = (1U << 7), /* break received */ - BAD_DEST = (1U << 6), /* bad destination */ - BAD_PROTO = (1U << 5), /* protocol not supported */ - BAD_RATE = (1U << 4), /* cxn rate not supported */ - WRONG_DEST = (1U << 3), /* wrong destination error */ - CREDIT_TO = (1U << 2), /* credit timeout */ - WDOG_TO = (1U << 1), /* watchdog timeout */ - BUF_PAR = (1U << 0), /* buffer parity error */ -}; - -enum error_info_rec_2 { - SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ - GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ - APP_CHK_ERR = (1U << 13), /* Application Check error */ - REF_CHK_ERR = (1U << 12), /* Reference Check Error */ - USR_BLK_NM = (1U << 0), /* User Block Number */ -}; - -struct mvs_chip_info { - u32 n_phy; - u32 srs_sz; - u32 slot_width; -}; - -struct mvs_err_info { - __le32 flags; - __le32 flags2; -}; - -struct mvs_prd { - __le64 addr; /* 64-bit buffer address */ - __le32 reserved; - __le32 len; /* 16-bit length */ -}; - -struct mvs_cmd_hdr { - __le32 flags; /* PRD tbl len; SAS, SATA ctl */ - __le32 lens; /* cmd, max resp frame len */ - __le32 tags; /* targ port xfer tag; tag */ - __le32 data_len; /* data xfer len */ - __le64 cmd_tbl; /* command table address */ - __le64 open_frame; /* open addr frame address */ - __le64 status_buf; /* status buffer address */ - __le64 prd_tbl; /* PRD tbl address */ - __le32 reserved[4]; -}; - -struct mvs_port { - struct asd_sas_port sas_port; - u8 port_attached; - u8 taskfileset; - u8 wide_port_phymap; - struct list_head list; -}; - -struct mvs_phy { - struct mvs_port *port; - struct asd_sas_phy sas_phy; - struct sas_identify identify; - struct scsi_device *sdev; - u64 dev_sas_addr; - u64 att_dev_sas_addr; - u32 att_dev_info; - u32 dev_info; - u32 phy_type; - u32 phy_status; - u32 irq_status; - u32 frame_rcvd_size; - u8 frame_rcvd[32]; - u8 phy_attached; - enum sas_linkrate minimum_linkrate; - enum sas_linkrate maximum_linkrate; -}; - -struct mvs_slot_info { - struct list_head list; - struct sas_task *task; - u32 n_elem; - u32 tx; - - /* DMA buffer for storing cmd tbl, open addr frame, status buffer, - * and PRD table - */ - void *buf; - dma_addr_t buf_dma; -#if _MV_DUMP - u32 cmd_size; -#endif - - void *response; - struct mvs_port *port; -}; - -struct mvs_info { - unsigned long flags; - - spinlock_t lock; /* host-wide lock */ - struct pci_dev *pdev; /* our device */ - void __iomem *regs; /* enhanced mode registers */ - void __iomem *peri_regs; /* peripheral registers */ - - u8 sas_addr[SAS_ADDR_SIZE]; - struct sas_ha_struct sas; /* SCSI/SAS glue */ - struct Scsi_Host *shost; - - __le32 *tx; /* TX (delivery) DMA ring */ - dma_addr_t tx_dma; - u32 tx_prod; /* cached next-producer idx */ - - __le32 *rx; /* RX (completion) DMA ring */ - dma_addr_t rx_dma; - u32 rx_cons; /* RX consumer idx */ - - __le32 *rx_fis; /* RX'd FIS area */ - dma_addr_t rx_fis_dma; - - struct mvs_cmd_hdr *slot; /* DMA command header slots */ - dma_addr_t slot_dma; - - const struct mvs_chip_info *chip; - - u8 tags[MVS_SLOTS]; - struct mvs_slot_info slot_info[MVS_SLOTS]; - /* further per-slot information */ - struct mvs_phy phy[MVS_MAX_PHYS]; - struct mvs_port port[MVS_MAX_PHYS]; -#ifdef MVS_USE_TASKLET - struct tasklet_struct tasklet; -#endif -}; - -static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, - void *funcdata); -static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port); -static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val); -static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port); -static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val); -static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val); -static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port); - -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); -static void mvs_detect_porttype(struct mvs_info *mvi, int i); -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); -static void mvs_release_task(struct mvs_info *mvi, int phy_no); - -static int mvs_scan_finished(struct Scsi_Host *, unsigned long); -static void mvs_scan_start(struct Scsi_Host *); -static int mvs_slave_configure(struct scsi_device *sdev); - -static struct scsi_transport_template *mvs_stt; - -static const struct mvs_chip_info mvs_chips[] = { - [chip_6320] = { 2, 16, 9 }, - [chip_6440] = { 4, 16, 9 }, - [chip_6480] = { 8, 32, 10 }, -}; - -static struct scsi_host_template mvs_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .queuecommand = sas_queuecommand, - .target_alloc = sas_target_alloc, - .slave_configure = mvs_slave_configure, - .slave_destroy = sas_slave_destroy, - .scan_finished = mvs_scan_finished, - .scan_start = mvs_scan_start, - .change_queue_depth = sas_change_queue_depth, - .change_queue_type = sas_change_queue_type, - .bios_param = sas_bios_param, - .can_queue = 1, - .cmd_per_lun = 1, - .this_id = -1, - .sg_tablesize = SG_ALL, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .use_clustering = ENABLE_CLUSTERING, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, - .slave_alloc = sas_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -}; - -static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) -{ - u32 i; - u32 run; - u32 offset; - - offset = 0; - while (size) { - printk("%08X : ", baseaddr + offset); - if (size >= 16) - run = 16; - else - run = size; - size -= run; - for (i = 0; i < 16; i++) { - if (i < run) - printk("%02X ", (u32)data[i]); - else - printk(" "); - } - printk(": "); - for (i = 0; i < run; i++) - printk("%c", isalnum(data[i]) ? data[i] : '.'); - printk("\n"); - data = &data[16]; - offset += run; - } - printk("\n"); -} - -#if _MV_DUMP -static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, - enum sas_protocol proto) -{ - u32 offset; - struct pci_dev *pdev = mvi->pdev; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - - offset = slot->cmd_size + MVS_OAF_SZ + - sizeof(struct mvs_prd) * slot->n_elem; - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", - tag); - mvs_hexdump(32, (u8 *) slot->response, - (u32) slot->buf_dma + offset); -} -#endif - -static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, - enum sas_protocol proto) -{ -#if _MV_DUMP - u32 sz, w_ptr; - u64 addr; - void __iomem *regs = mvi->regs; - struct pci_dev *pdev = mvi->pdev; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - - /*Delivery Queue */ - sz = mr32(TX_CFG) & TX_RING_SZ_MASK; - w_ptr = slot->tx; - addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); - dev_printk(KERN_DEBUG, &pdev->dev, - "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); - dev_printk(KERN_DEBUG, &pdev->dev, - "Delivery Queue Base Address=0x%llX (PA)" - "(tx_dma=0x%llX), Entry=%04d\n", - addr, mvi->tx_dma, w_ptr); - mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), - (u32) mvi->tx_dma + sizeof(u32) * w_ptr); - /*Command List */ - addr = mvi->slot_dma; - dev_printk(KERN_DEBUG, &pdev->dev, - "Command List Base Address=0x%llX (PA)" - "(slot_dma=0x%llX), Header=%03d\n", - addr, slot->buf_dma, tag); - dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); - /*mvs_cmd_hdr */ - mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), - (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); - /*1.command table area */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); - mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); - /*2.open address frame area */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); - mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, - (u32) slot->buf_dma + slot->cmd_size); - /*3.status buffer */ - mvs_hba_sb_dump(mvi, tag, proto); - /*4.PRD table */ - dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); - mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, - (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, - (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); -#endif -} - -static void mvs_hba_cq_dump(struct mvs_info *mvi) -{ -#if (_MV_DUMP > 2) - u64 addr; - void __iomem *regs = mvi->regs; - struct pci_dev *pdev = mvi->pdev; - u32 entry = mvi->rx_cons + 1; - u32 rx_desc = le32_to_cpu(mvi->rx[entry]); - - /*Completion Queue */ - addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); - dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", - mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); - dev_printk(KERN_DEBUG, &pdev->dev, - "Completion List Base Address=0x%llX (PA), " - "CQ_Entry=%04d, CQ_WP=0x%08X\n", - addr, entry - 1, mvi->rx[0]); - mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), - mvi->rx_dma + sizeof(u32) * entry); -#endif -} - -static void mvs_hba_interrupt_enable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(GBL_CTL); - - mw32(GBL_CTL, tmp | INT_EN); -} - -static void mvs_hba_interrupt_disable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(GBL_CTL); - - mw32(GBL_CTL, tmp & ~INT_EN); -} - -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); - -/* move to PCI layer or libata core? */ -static int pci_go_64(struct pci_dev *pdev) -{ - int rc; - - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "64-bit DMA enable failed\n"); - return rc; - } - } - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); - return rc; - } - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit consistent DMA enable failed\n"); - return rc; - } - } - - return rc; -} - -static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) -{ - if (task->lldd_task) { - struct mvs_slot_info *slot; - slot = (struct mvs_slot_info *) task->lldd_task; - *tag = slot - mvi->slot_info; - return 1; - } - return 0; -} - -static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) -{ - void *bitmap = (void *) &mvi->tags; - clear_bit(tag, bitmap); -} - -static void mvs_tag_free(struct mvs_info *mvi, u32 tag) -{ - mvs_tag_clear(mvi, tag); -} - -static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) -{ - void *bitmap = (void *) &mvi->tags; - set_bit(tag, bitmap); -} - -static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) -{ - unsigned int index, tag; - void *bitmap = (void *) &mvi->tags; - - index = find_first_zero_bit(bitmap, MVS_SLOTS); - tag = index; - if (tag >= MVS_SLOTS) - return -SAS_QUEUE_FULL; - mvs_tag_set(mvi, tag); - *tag_out = tag; - return 0; -} - -static void mvs_tag_init(struct mvs_info *mvi) -{ - int i; - for (i = 0; i < MVS_SLOTS; ++i) - mvs_tag_clear(mvi, i); -} - -#ifndef MVS_DISABLE_NVRAM -static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) -{ - int timeout = 1000; - - if (addr & ~SPI_ADDR_MASK) - return -EINVAL; - - writel(addr, regs + SPI_CMD); - writel(TWSI_RD, regs + SPI_CTL); - - while (timeout-- > 0) { - if (readl(regs + SPI_CTL) & TWSI_RDY) { - *data = readl(regs + SPI_DATA); - return 0; - } - - udelay(10); - } - - return -EBUSY; -} - -static int mvs_eep_read_buf(void __iomem *regs, u32 addr, - void *buf, u32 buflen) -{ - u32 addr_end, tmp_addr, i, j; - u32 tmp = 0; - int rc; - u8 *tmp8, *buf8 = buf; - - addr_end = addr + buflen; - tmp_addr = ALIGN(addr, 4); - if (addr > 0xff) - return -EINVAL; - - j = addr & 0x3; - if (j) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - tmp8 = (u8 *)&tmp; - for (i = j; i < 4; i++) - *buf8++ = tmp8[i]; - - tmp_addr += 4; - } - - for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - memcpy(buf8, &tmp, 4); - buf8 += 4; - } - - if (tmp_addr < addr_end) { - rc = mvs_eep_read(regs, tmp_addr, &tmp); - if (rc) - return rc; - - tmp8 = (u8 *)&tmp; - j = addr_end - tmp_addr; - for (i = 0; i < j; i++) - *buf8++ = tmp8[i]; - - tmp_addr += 4; - } - - return 0; -} -#endif - -static int mvs_nvram_read(struct mvs_info *mvi, u32 addr, - void *buf, u32 buflen) -{ -#ifndef MVS_DISABLE_NVRAM - void __iomem *regs = mvi->regs; - int rc, i; - u32 sum; - u8 hdr[2], *tmp; - const char *msg; - - rc = mvs_eep_read_buf(regs, addr, &hdr, 2); - if (rc) { - msg = "nvram hdr read failed"; - goto err_out; - } - rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); - if (rc) { - msg = "nvram read failed"; - goto err_out; - } - - if (hdr[0] != 0x5A) { - /* entry id */ - msg = "invalid nvram entry id"; - rc = -ENOENT; - goto err_out; - } - - tmp = buf; - sum = ((u32)hdr[0]) + ((u32)hdr[1]); - for (i = 0; i < buflen; i++) - sum += ((u32)tmp[i]); - - if (sum) { - msg = "nvram checksum failure"; - rc = -EILSEQ; - goto err_out; - } - - return 0; - -err_out: - dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); - return rc; -#else - /* FIXME , For SAS target mode */ - memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); - return 0; -#endif -} - -static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - - if (!phy->phy_attached) - return; - - if (sas_phy->phy) { - struct sas_phy *sphy = sas_phy->phy; - - sphy->negotiated_linkrate = sas_phy->linkrate; - sphy->minimum_linkrate = phy->minimum_linkrate; - sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - sphy->maximum_linkrate = phy->maximum_linkrate; - sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; - } - - if (phy->phy_type & PORT_TYPE_SAS) { - struct sas_identify_frame *id; - - id = (struct sas_identify_frame *)phy->frame_rcvd; - id->dev_type = phy->identify.device_type; - id->initiator_bits = SAS_PROTOCOL_ALL; - id->target_bits = phy->identify.target_port_protocols; - } else if (phy->phy_type & PORT_TYPE_SATA) { - /* TODO */ - } - mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; - mvi->sas.notify_port_event(mvi->sas.sas_phy[i], - PORTE_BYTES_DMAED); -} - -static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) -{ - /* give the phy enabling interrupt event time to come in (1s - * is empirically about all it takes) */ - if (time < HZ) - return 0; - /* Wait for discovery to finish */ - scsi_flush_work(shost); - return 1; -} - -static void mvs_scan_start(struct Scsi_Host *shost) -{ - int i; - struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; - - for (i = 0; i < mvi->chip->n_phy; ++i) { - mvs_bytes_dmaed(mvi, i); - } -} - -static int mvs_slave_configure(struct scsi_device *sdev) -{ - struct domain_device *dev = sdev_to_domain_dev(sdev); - int ret = sas_slave_configure(sdev); - - if (ret) - return ret; - - if (dev_is_sata(dev)) { - /* struct ata_port *ap = dev->sata_dev.ap; */ - /* struct ata_device *adev = ap->link.device; */ - - /* clamp at no NCQ for the time being */ - /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); - } - return 0; -} - -static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) -{ - struct pci_dev *pdev = mvi->pdev; - struct sas_ha_struct *sas_ha = &mvi->sas; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); - /* - * events is port event now , - * we need check the interrupt status which belongs to per port. - */ - dev_printk(KERN_DEBUG, &pdev->dev, - "Port %d Event = %X\n", - phy_no, phy->irq_status); - - if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { - mvs_release_task(mvi, phy_no); - if (!mvs_is_phy_ready(mvi, phy_no)) { - sas_phy_disconnected(sas_phy); - sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); - dev_printk(KERN_INFO, &pdev->dev, - "Port %d Unplug Notice\n", phy_no); - - } else - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); - } - if (!(phy->irq_status & PHYEV_DEC_ERR)) { - if (phy->irq_status & PHYEV_COMWAKE) { - u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); - mvs_write_port_irq_mask(mvi, phy_no, - tmp | PHYEV_SIG_FIS); - } - if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { - phy->phy_status = mvs_is_phy_ready(mvi, phy_no); - if (phy->phy_status) { - mvs_detect_porttype(mvi, phy_no); - - if (phy->phy_type & PORT_TYPE_SATA) { - u32 tmp = mvs_read_port_irq_mask(mvi, - phy_no); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_mask(mvi, - phy_no, tmp); - } - - mvs_update_phyinfo(mvi, phy_no, 0); - sas_ha->notify_phy_event(sas_phy, - PHYE_OOB_DONE); - mvs_bytes_dmaed(mvi, phy_no); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "plugin interrupt but phy is gone\n"); - mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, - NULL); - } - } else if (phy->irq_status & PHYEV_BROAD_CH) { - mvs_release_task(mvi, phy_no); - sas_ha->notify_port_event(sas_phy, - PORTE_BROADCAST_RCVD); - } - } - mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); -} - -static void mvs_int_sata(struct mvs_info *mvi) -{ - u32 tmp; - void __iomem *regs = mvi->regs; - tmp = mr32(INT_STAT_SRS); - mw32(INT_STAT_SRS, tmp & 0xFFFF); -} - -static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - void __iomem *regs = mvi->regs; - struct domain_device *dev = task->dev; - struct asd_sas_port *sas_port = dev->port; - struct mvs_port *port = mvi->slot_info[slot_idx].port; - u32 reg_set, phy_mask; - - if (!sas_protocol_ata(task->task_proto)) { - reg_set = 0; - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - sas_port->phy_mask; - } else { - reg_set = port->taskfileset; - phy_mask = sas_port->phy_mask; - } - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | - (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | - (phy_mask << TXQ_PHY_SHIFT) | - (reg_set << TXQ_SRS_SHIFT)); - - mw32(TX_PROD_IDX, mvi->tx_prod); - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); -} - -static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx, int err) -{ - struct mvs_port *port = mvi->slot_info[slot_idx].port; - struct task_status_struct *tstat = &task->task_status; - struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; - int stat = SAM_GOOD; - - resp->frame_len = sizeof(struct dev_to_host_fis); - memcpy(&resp->ending_fis[0], - SATA_RECEIVED_D2H_FIS(port->taskfileset), - sizeof(struct dev_to_host_fis)); - tstat->buf_valid_size = sizeof(*resp); - if (unlikely(err)) - stat = SAS_PROTO_RESPONSE; - return stat; -} - -static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) -{ - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - mvs_tag_clear(mvi, slot_idx); -} - -static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, - struct mvs_slot_info *slot, u32 slot_idx) -{ - if (!sas_protocol_ata(task->task_proto)) - if (slot->n_elem) - pci_unmap_sg(mvi->pdev, task->scatter, - slot->n_elem, task->data_dir); - - switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, - PCI_DMA_FROMDEVICE); - pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); - break; - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SSP: - default: - /* do nothing */ - break; - } - list_del(&slot->list); - task->lldd_task = NULL; - slot->task = NULL; - slot->port = NULL; -} - -static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); - u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); - int stat = SAM_CHECK_COND; - - if (err_dw1 & SLOT_BSY_ERR) { - stat = SAS_QUEUE_FULL; - mvs_slot_reset(mvi, task, slot_idx); - } - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - break; - case SAS_PROTOCOL_SMP: - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - if (err_dw0 & TFILE_ERR) - stat = mvs_sata_done(mvi, task, slot_idx, 1); - break; - default: - break; - } - - mvs_hexdump(16, (u8 *) slot->response, 0); - return stat; -} - -static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) -{ - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - struct sas_task *task = slot->task; - struct task_status_struct *tstat; - struct mvs_port *port; - bool aborted; - void *to; - - if (unlikely(!task || !task->lldd_task)) - return -1; - - mvs_hba_cq_dump(mvi); - - spin_lock(&task->task_state_lock); - aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; - if (!aborted) { - task->task_state_flags &= - ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; - } - spin_unlock(&task->task_state_lock); - - if (aborted) { - mvs_slot_task_free(mvi, task, slot, slot_idx); - mvs_slot_free(mvi, rx_desc); - return -1; - } - - port = slot->port; - tstat = &task->task_status; - memset(tstat, 0, sizeof(*tstat)); - tstat->resp = SAS_TASK_COMPLETE; - - if (unlikely(!port->port_attached || flags)) { - mvs_slot_err(mvi, task, slot_idx); - if (!sas_protocol_ata(task->task_proto)) - tstat->stat = SAS_PHY_DOWN; - goto out; - } - - /* error info record present */ - if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { - tstat->stat = mvs_slot_err(mvi, task, slot_idx); - goto out; - } - - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - /* hw says status == 0, datapres == 0 */ - if (rx_desc & RXQ_GOOD) { - tstat->stat = SAM_GOOD; - tstat->resp = SAS_TASK_COMPLETE; - } - /* response frame present */ - else if (rx_desc & RXQ_RSP) { - struct ssp_response_iu *iu = - slot->response + sizeof(struct mvs_err_info); - sas_ssp_task_response(&mvi->pdev->dev, task, iu); - } - - /* should never happen? */ - else - tstat->stat = SAM_CHECK_COND; - break; - - case SAS_PROTOCOL_SMP: { - struct scatterlist *sg_resp = &task->smp_task.smp_resp; - tstat->stat = SAM_GOOD; - to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); - memcpy(to + sg_resp->offset, - slot->response + sizeof(struct mvs_err_info), - sg_dma_len(sg_resp)); - kunmap_atomic(to, KM_IRQ0); - break; - } - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { - tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); - break; - } - - default: - tstat->stat = SAM_CHECK_COND; - break; - } - -out: - mvs_slot_task_free(mvi, task, slot, slot_idx); - if (unlikely(tstat->stat != SAS_QUEUE_FULL)) - mvs_slot_free(mvi, rx_desc); - - spin_unlock(&mvi->lock); - task->task_done(task); - spin_lock(&mvi->lock); - return tstat->stat; -} - -static void mvs_release_task(struct mvs_info *mvi, int phy_no) -{ - struct list_head *pos, *n; - struct mvs_slot_info *slot; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct mvs_port *port = phy->port; - u32 rx_desc; - - if (!port) - return; - - list_for_each_safe(pos, n, &port->list) { - slot = container_of(pos, struct mvs_slot_info, list); - rx_desc = (u32) (slot - mvi->slot_info); - mvs_slot_complete(mvi, rx_desc, 1); - } -} - -static void mvs_int_full(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp, stat; - int i; - - stat = mr32(INT_STAT); - - mvs_int_rx(mvi, false); - - for (i = 0; i < MVS_MAX_PORTS; i++) { - tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); - if (tmp) - mvs_int_port(mvi, i, tmp); - } - - if (stat & CINT_SRS) - mvs_int_sata(mvi); - - mw32(INT_STAT, stat); -} - -static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) -{ - void __iomem *regs = mvi->regs; - u32 rx_prod_idx, rx_desc; - bool attn = false; - struct pci_dev *pdev = mvi->pdev; - - /* the first dword in the RX ring is special: it contains - * a mirror of the hardware's RX producer index, so that - * we don't have to stall the CPU reading that register. - * The actual RX ring is offset by one dword, due to this. - */ - rx_prod_idx = mvi->rx_cons; - mvi->rx_cons = le32_to_cpu(mvi->rx[0]); - if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ - return 0; - - /* The CMPL_Q may come late, read from register and try again - * note: if coalescing is enabled, - * it will need to read from register every time for sure - */ - if (mvi->rx_cons == rx_prod_idx) - mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; - - if (mvi->rx_cons == rx_prod_idx) - return 0; - - while (mvi->rx_cons != rx_prod_idx) { - - /* increment our internal RX consumer pointer */ - rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); - - rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); - - if (likely(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - if (rx_desc & RXQ_ATTN) { - attn = true; - dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", - rx_desc); - } else if (rx_desc & RXQ_ERR) { - if (!(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", - rx_desc); - } else if (rx_desc & RXQ_SLOT_RESET) { - dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", - rx_desc); - mvs_slot_free(mvi, rx_desc); - } - } - - if (attn && self_clear) - mvs_int_full(mvi); - - return 0; -} - -#ifdef MVS_USE_TASKLET -static void mvs_tasklet(unsigned long data) -{ - struct mvs_info *mvi = (struct mvs_info *) data; - unsigned long flags; - - spin_lock_irqsave(&mvi->lock, flags); - -#ifdef MVS_DISABLE_MSI - mvs_int_full(mvi); -#else - mvs_int_rx(mvi, true); -#endif - spin_unlock_irqrestore(&mvi->lock, flags); -} -#endif - -static irqreturn_t mvs_interrupt(int irq, void *opaque) -{ - struct mvs_info *mvi = opaque; - void __iomem *regs = mvi->regs; - u32 stat; - - stat = mr32(GBL_INT_STAT); - - if (stat == 0 || stat == 0xffffffff) - return IRQ_NONE; - - /* clear CMD_CMPLT ASAP */ - mw32_f(INT_STAT, CINT_DONE); - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - - mvs_int_full(mvi); - - spin_unlock(&mvi->lock); -#else - tasklet_schedule(&mvi->tasklet); -#endif - return IRQ_HANDLED; -} - -#ifndef MVS_DISABLE_MSI -static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) -{ - struct mvs_info *mvi = opaque; - -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - - mvs_int_rx(mvi, true); - - spin_unlock(&mvi->lock); -#else - tasklet_schedule(&mvi->tasklet); -#endif - return IRQ_HANDLED; -} -#endif - -struct mvs_task_exec_info { - struct sas_task *task; - struct mvs_cmd_hdr *hdr; - struct mvs_port *port; - u32 tag; - int n_elem; -}; - -static int mvs_task_prep_smp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - int elem, rc, i; - struct sas_task *task = tei->task; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct scatterlist *sg_req, *sg_resp; - u32 req_len, resp_len, tag = tei->tag; - void *buf_tmp; - u8 *buf_oaf; - dma_addr_t buf_tmp_dma; - struct mvs_prd *buf_prd; - struct scatterlist *sg; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - struct asd_sas_port *sas_port = task->dev->port; - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#if _MV_DUMP - u8 *buf_cmd; - void *from; -#endif - /* - * DMA-map SMP request, response buffers - */ - sg_req = &task->smp_task.smp_req; - elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); - if (!elem) - return -ENOMEM; - req_len = sg_dma_len(sg_req); - - sg_resp = &task->smp_task.smp_resp; - elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); - if (!elem) { - rc = -ENOMEM; - goto err_out; - } - resp_len = sg_dma_len(sg_resp); - - /* must be in dwords */ - if ((req_len & 0x3) || (resp_len & 0x3)) { - rc = -EINVAL; - goto err_out_2; - } - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ - buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - -#if _MV_DUMP - buf_cmd = buf_tmp; - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - buf_tmp += req_len; - buf_tmp_dma += req_len; - slot->cmd_size = req_len; -#else - hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = sizeof(struct mvs_prd) * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - - /* - * Fill in TX ring and command slot header - */ - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | - TXQ_MODE_I | tag | - (sas_port->phy_mask << TXQ_PHY_SHIFT)); - - hdr->flags |= flags; - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); - hdr->tags = cpu_to_le32(tag); - hdr->data_len = 0; - - /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - -#if _MV_DUMP - /* copy cmd table */ - from = kmap_atomic(sg_page(sg_req), KM_IRQ0); - memcpy(buf_cmd, from + sg_req->offset, req_len); - kunmap_atomic(from, KM_IRQ0); -#endif - return 0; - -err_out_2: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, - PCI_DMA_FROMDEVICE); -err_out: - pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); - return rc; -} - -static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) -{ - void __iomem *regs = mvi->regs; - u32 tmp, offs; - u8 *tfs = &port->taskfileset; - - if (*tfs == MVS_ID_NOT_MAPPED) - return; - - offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); - if (*tfs < 16) { - tmp = mr32(PCS); - mw32(PCS, tmp & ~offs); - } else { - tmp = mr32(CTL); - mw32(CTL, tmp & ~offs); - } - - tmp = mr32(INT_STAT_SRS) & (1U << *tfs); - if (tmp) - mw32(INT_STAT_SRS, tmp); - - *tfs = MVS_ID_NOT_MAPPED; -} - -static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) -{ - int i; - u32 tmp, offs; - void __iomem *regs = mvi->regs; - - if (port->taskfileset != MVS_ID_NOT_MAPPED) - return 0; - - tmp = mr32(PCS); - - for (i = 0; i < mvi->chip->srs_sz; i++) { - if (i == 16) - tmp = mr32(CTL); - offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); - if (!(tmp & offs)) { - port->taskfileset = i; - - if (i < 16) - mw32(PCS, tmp | offs); - else - mw32(CTL, tmp | offs); - tmp = mr32(INT_STAT_SRS) & (1U << i); - if (tmp) - mw32(INT_STAT_SRS, tmp); - return 0; - } - } - return MVS_ID_NOT_MAPPED; -} - -static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) -{ - struct ata_queued_cmd *qc = task->uldd_task; - - if (qc) { - if (qc->tf.command == ATA_CMD_FPDMA_WRITE || - qc->tf.command == ATA_CMD_FPDMA_READ) { - *tag = qc->tag; - return 1; - } - } - - return 0; -} - -static int mvs_task_prep_ata(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - struct sas_task *task = tei->task; - struct domain_device *dev = task->dev; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct asd_sas_port *sas_port = dev->port; - struct mvs_slot_info *slot; - struct scatterlist *sg; - struct mvs_prd *buf_prd; - struct mvs_port *port = tei->port; - u32 tag = tei->tag; - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); - void *buf_tmp; - u8 *buf_cmd, *buf_oaf; - dma_addr_t buf_tmp_dma; - u32 i, req_len, resp_len; - const u32 max_resp_len = SB_RFB_MAX; - - if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) - return -EBUSY; - - slot = &mvi->slot_info[tag]; - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | - (TXQ_CMD_STP << TXQ_CMD_SHIFT) | - (sas_port->phy_mask << TXQ_PHY_SHIFT) | - (port->taskfileset << TXQ_SRS_SHIFT)); - - if (task->ata_task.use_ncq) - flags |= MCH_FPDMA; - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { - if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) - flags |= MCH_ATAPI; - } - - /* FIXME: fill in port multiplier number */ - - hdr->flags = cpu_to_le32(flags); - - /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ - if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) - task->ata_task.fis.sector_count |= hdr->tags << 3; - else - hdr->tags = cpu_to_le32(tag); - hdr->data_len = cpu_to_le32(task->total_xfer_len); - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ - buf_cmd = buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_ATA_CMD_SZ; - buf_tmp_dma += MVS_ATA_CMD_SZ; -#if _MV_DUMP - slot->cmd_size = MVS_ATA_CMD_SZ; -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - /* used for STP. unused for SATA? */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = sizeof(struct mvs_prd) * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - /* FIXME: probably unused, for SATA. kept here just in case - * we get a STP/SATA error information record - */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - - req_len = sizeof(struct host_to_dev_fis); - resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - - sizeof(struct mvs_err_info) - i; - - /* request, response lengths */ - resp_len = min(resp_len, max_resp_len); - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); - - task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ - /* fill in command FIS and ATAPI CDB */ - memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) - memcpy(buf_cmd + STP_ATAPI_CMD, - task->ata_task.atapi_packet, 16); - - /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - - return 0; -} - -static int mvs_task_prep_ssp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - struct sas_task *task = tei->task; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct mvs_port *port = tei->port; - struct mvs_slot_info *slot; - struct scatterlist *sg; - struct mvs_prd *buf_prd; - struct ssp_frame_hdr *ssp_hdr; - void *buf_tmp; - u8 *buf_cmd, *buf_oaf, fburst = 0; - dma_addr_t buf_tmp_dma; - u32 flags; - u32 resp_len, req_len, i, tag = tei->tag; - const u32 max_resp_len = SB_RFB_MAX; - u8 phy_mask; - - slot = &mvi->slot_info[tag]; - - phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : - task->dev->port->phy_mask; - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | - (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | - (phy_mask << TXQ_PHY_SHIFT)); - - flags = MCH_RETRY; - if (task->ssp_task.enable_first_burst) { - flags |= MCH_FBURST; - fburst = (1 << 7); - } - hdr->flags = cpu_to_le32(flags | - (tei->n_elem << MCH_PRD_LEN_SHIFT) | - (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); - - hdr->tags = cpu_to_le32(tag); - hdr->data_len = cpu_to_le32(task->total_xfer_len); - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ - buf_cmd = buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_SSP_CMD_SZ; - buf_tmp_dma += MVS_SSP_CMD_SZ; -#if _MV_DUMP - slot->cmd_size = MVS_SSP_CMD_SZ; -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = sizeof(struct mvs_prd) * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - - resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - - sizeof(struct mvs_err_info) - i; - resp_len = min(resp_len, max_resp_len); - - req_len = sizeof(struct ssp_frame_hdr) + 28; - - /* request, response lengths */ - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); - - /* generate open address frame hdr (first 12 bytes) */ - buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ - buf_oaf[1] = task->dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); - memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in SSP frame header (Command Table.SSP frame header) */ - ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; - ssp_hdr->frame_type = SSP_COMMAND; - memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, - HASHED_SAS_ADDR_SIZE); - memcpy(ssp_hdr->hashed_src_addr, - task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); - ssp_hdr->tag = cpu_to_be16(tag); - - /* fill in command frame IU */ - buf_cmd += sizeof(*ssp_hdr); - memcpy(buf_cmd, &task->ssp_task.LUN, 8); - buf_cmd[9] = fburst | task->ssp_task.task_attr | - (task->ssp_task.task_prio << 3); - memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); - - /* fill in PRD (scatter/gather) table, if any */ - for_each_sg(task->scatter, sg, tei->n_elem, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } - - return 0; -} - -static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) -{ - struct domain_device *dev = task->dev; - struct mvs_info *mvi = dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - void __iomem *regs = mvi->regs; - struct mvs_task_exec_info tei; - struct sas_task *t = task; - struct mvs_slot_info *slot; - u32 tag = 0xdeadbeef, rc, n_elem = 0; - unsigned long flags; - u32 n = num, pass = 0; - - spin_lock_irqsave(&mvi->lock, flags); - do { - dev = t->dev; - tei.port = &mvi->port[dev->port->id]; - - if (!tei.port->port_attached) { - if (sas_protocol_ata(t->task_proto)) { - rc = SAS_PHY_DOWN; - goto out_done; - } else { - struct task_status_struct *ts = &t->task_status; - ts->resp = SAS_TASK_UNDELIVERED; - ts->stat = SAS_PHY_DOWN; - t->task_done(t); - if (n > 1) - t = list_entry(t->list.next, - struct sas_task, list); - continue; - } - } - - if (!sas_protocol_ata(t->task_proto)) { - if (t->num_scatter) { - n_elem = pci_map_sg(mvi->pdev, t->scatter, - t->num_scatter, - t->data_dir); - if (!n_elem) { - rc = -ENOMEM; - goto err_out; - } - } - } else { - n_elem = t->num_scatter; - } - - rc = mvs_tag_alloc(mvi, &tag); - if (rc) - goto err_out; - - slot = &mvi->slot_info[tag]; - t->lldd_task = NULL; - slot->n_elem = n_elem; - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); - tei.task = t; - tei.hdr = &mvi->slot[tag]; - tei.tag = tag; - tei.n_elem = n_elem; - - switch (t->task_proto) { - case SAS_PROTOCOL_SMP: - rc = mvs_task_prep_smp(mvi, &tei); - break; - case SAS_PROTOCOL_SSP: - rc = mvs_task_prep_ssp(mvi, &tei); - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - rc = mvs_task_prep_ata(mvi, &tei); - break; - default: - dev_printk(KERN_ERR, &pdev->dev, - "unknown sas_task proto: 0x%x\n", - t->task_proto); - rc = -EINVAL; - break; - } - - if (rc) - goto err_out_tag; - - slot->task = t; - slot->port = tei.port; - t->lldd_task = (void *) slot; - list_add_tail(&slot->list, &slot->port->list); - /* TODO: select normal or high priority */ - - spin_lock(&t->task_state_lock); - t->task_state_flags |= SAS_TASK_AT_INITIATOR; - spin_unlock(&t->task_state_lock); - - mvs_hba_memory_dump(mvi, tag, t->task_proto); - - ++pass; - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); - if (n > 1) - t = list_entry(t->list.next, struct sas_task, list); - } while (--n); - - rc = 0; - goto out_done; - -err_out_tag: - mvs_tag_free(mvi, tag); -err_out: - dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); - if (!sas_protocol_ata(t->task_proto)) - if (n_elem) - pci_unmap_sg(mvi->pdev, t->scatter, n_elem, - t->data_dir); -out_done: - if (pass) - mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); - spin_unlock_irqrestore(&mvi->lock, flags); - return rc; -} - -static int mvs_task_abort(struct sas_task *task) -{ - int rc; - unsigned long flags; - struct mvs_info *mvi = task->dev->port->ha->lldd_ha; - struct pci_dev *pdev = mvi->pdev; - int tag; - - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_STATE_DONE) { - rc = TMF_RESP_FUNC_COMPLETE; - spin_unlock_irqrestore(&task->task_state_lock, flags); - goto out_done; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - - switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); - break; - case SAS_PROTOCOL_SSP: - dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ - dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); -#if _MV_DUMP - dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); - mvs_hexdump(sizeof(struct host_to_dev_fis), - (void *)&task->ata_task.fis, 0); - dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); - mvs_hexdump(16, task->ata_task.atapi_packet, 0); -#endif - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { - /* TODO */ - ; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - break; - } - default: - break; - } - - if (mvs_find_tag(mvi, task, &tag)) { - spin_lock_irqsave(&mvi->lock, flags); - mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); - spin_unlock_irqrestore(&mvi->lock, flags); - } - if (!mvs_task_exec(task, 1, GFP_ATOMIC)) - rc = TMF_RESP_FUNC_COMPLETE; - else - rc = TMF_RESP_FUNC_FAILED; -out_done: - return rc; -} - -static void mvs_free(struct mvs_info *mvi) -{ - int i; - - if (!mvi) - return; - - for (i = 0; i < MVS_SLOTS; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; - - if (slot->buf) - dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, - slot->buf, slot->buf_dma); - } - - if (mvi->tx) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, - mvi->tx, mvi->tx_dma); - if (mvi->rx_fis) - dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, - mvi->rx_fis, mvi->rx_fis_dma); - if (mvi->rx) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), - mvi->rx, mvi->rx_dma); - if (mvi->slot) - dma_free_coherent(&mvi->pdev->dev, - sizeof(*mvi->slot) * MVS_SLOTS, - mvi->slot, mvi->slot_dma); -#ifdef MVS_ENABLE_PERI - if (mvi->peri_regs) - iounmap(mvi->peri_regs); -#endif - if (mvi->regs) - iounmap(mvi->regs); - if (mvi->shost) - scsi_host_put(mvi->shost); - kfree(mvi->sas.sas_port); - kfree(mvi->sas.sas_phy); - kfree(mvi); -} - -/* FIXME: locking? */ -static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, - void *funcdata) -{ - struct mvs_info *mvi = sas_phy->ha->lldd_ha; - int rc = 0, phy_id = sas_phy->id; - u32 tmp; - - tmp = mvs_read_phy_ctl(mvi, phy_id); - - switch (func) { - case PHY_FUNC_SET_LINK_RATE:{ - struct sas_phy_linkrates *rates = funcdata; - u32 lrmin = 0, lrmax = 0; - - lrmin = (rates->minimum_linkrate << 8); - lrmax = (rates->maximum_linkrate << 12); - - if (lrmin) { - tmp &= ~(0xf << 8); - tmp |= lrmin; - } - if (lrmax) { - tmp &= ~(0xf << 12); - tmp |= lrmax; - } - mvs_write_phy_ctl(mvi, phy_id, tmp); - break; - } - - case PHY_FUNC_HARD_RESET: - if (tmp & PHY_RST_HARD) - break; - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); - break; - - case PHY_FUNC_LINK_RESET: - mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); - break; - - case PHY_FUNC_DISABLE: - case PHY_FUNC_RELEASE_SPINUP_HOLD: - default: - rc = -EOPNOTSUPP; - } - - return rc; -} - -static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) -{ - struct mvs_phy *phy = &mvi->phy[phy_id]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; - sas_phy->class = SAS; - sas_phy->iproto = SAS_PROTOCOL_ALL; - sas_phy->tproto = 0; - sas_phy->type = PHY_TYPE_PHYSICAL; - sas_phy->role = PHY_ROLE_INITIATOR; - sas_phy->oob_mode = OOB_NOT_CONNECTED; - sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; - - sas_phy->id = phy_id; - sas_phy->sas_addr = &mvi->sas_addr[0]; - sas_phy->frame_rcvd = &phy->frame_rcvd[0]; - sas_phy->ha = &mvi->sas; - sas_phy->lldd_phy = phy; -} - -static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct mvs_info *mvi; - unsigned long res_start, res_len, res_flag; - struct asd_sas_phy **arr_phy; - struct asd_sas_port **arr_port; - const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; - int i; - - /* - * alloc and init our per-HBA mvs_info struct - */ - - mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); - if (!mvi) - return NULL; - - spin_lock_init(&mvi->lock); -#ifdef MVS_USE_TASKLET - tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); -#endif - mvi->pdev = pdev; - mvi->chip = chip; - - if (pdev->device == 0x6440 && pdev->revision == 0) - mvi->flags |= MVF_PHY_PWR_FIX; - - /* - * alloc and init SCSI, SAS glue - */ - - mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); - if (!mvi->shost) - goto err_out; - - arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); - arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); - if (!arr_phy || !arr_port) - goto err_out; - - for (i = 0; i < MVS_MAX_PHYS; i++) { - mvs_phy_init(mvi, i); - arr_phy[i] = &mvi->phy[i].sas_phy; - arr_port[i] = &mvi->port[i].sas_port; - mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->port[i].wide_port_phymap = 0; - mvi->port[i].port_attached = 0; - INIT_LIST_HEAD(&mvi->port[i].list); - } - - SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; - mvi->shost->transportt = mvs_stt; - mvi->shost->max_id = 21; - mvi->shost->max_lun = ~0; - mvi->shost->max_channel = 0; - mvi->shost->max_cmd_len = 16; - - mvi->sas.sas_ha_name = DRV_NAME; - mvi->sas.dev = &pdev->dev; - mvi->sas.lldd_module = THIS_MODULE; - mvi->sas.sas_addr = &mvi->sas_addr[0]; - mvi->sas.sas_phy = arr_phy; - mvi->sas.sas_port = arr_port; - mvi->sas.num_phys = chip->n_phy; - mvi->sas.lldd_max_execute_num = 1; - mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; - mvi->shost->can_queue = MVS_CAN_QUEUE; - mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; - mvi->sas.lldd_ha = mvi; - mvi->sas.core.shost = mvi->shost; - - mvs_tag_init(mvi); - - /* - * ioremap main and peripheral registers - */ - -#ifdef MVS_ENABLE_PERI - res_start = pci_resource_start(pdev, 2); - res_len = pci_resource_len(pdev, 2); - if (!res_start || !res_len) - goto err_out; - - mvi->peri_regs = ioremap_nocache(res_start, res_len); - if (!mvi->peri_regs) - goto err_out; -#endif - - res_start = pci_resource_start(pdev, 4); - res_len = pci_resource_len(pdev, 4); - if (!res_start || !res_len) - goto err_out; - - res_flag = pci_resource_flags(pdev, 4); - if (res_flag & IORESOURCE_CACHEABLE) - mvi->regs = ioremap(res_start, res_len); - else - mvi->regs = ioremap_nocache(res_start, res_len); - - if (!mvi->regs) - goto err_out; - - /* - * alloc and init our DMA areas - */ - - mvi->tx = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, - &mvi->tx_dma, GFP_KERNEL); - if (!mvi->tx) - goto err_out; - memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); - - mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, - &mvi->rx_fis_dma, GFP_KERNEL); - if (!mvi->rx_fis) - goto err_out; - memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); - - mvi->rx = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), - &mvi->rx_dma, GFP_KERNEL); - if (!mvi->rx) - goto err_out; - memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); - - mvi->rx[0] = cpu_to_le32(0xfff); - mvi->rx_cons = 0xfff; - - mvi->slot = dma_alloc_coherent(&pdev->dev, - sizeof(*mvi->slot) * MVS_SLOTS, - &mvi->slot_dma, GFP_KERNEL); - if (!mvi->slot) - goto err_out; - memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); - - for (i = 0; i < MVS_SLOTS; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; - - slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, - &slot->buf_dma, GFP_KERNEL); - if (!slot->buf) - goto err_out; - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); - } - - /* finally, read NVRAM to get our SAS address */ - if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) - goto err_out; - return mvi; - -err_out: - mvs_free(mvi); - return NULL; -} - -static u32 mvs_cr32(void __iomem *regs, u32 addr) -{ - mw32(CMD_ADDR, addr); - return mr32(CMD_DATA); -} - -static void mvs_cw32(void __iomem *regs, u32 addr, u32 val) -{ - mw32(CMD_ADDR, addr); - mw32(CMD_DATA, val); -} - -static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) -{ - void __iomem *regs = mvi->regs; - return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): - mr32(P4_SER_CTLSTAT + (port - 4) * 4); -} - -static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) -{ - void __iomem *regs = mvi->regs; - if (port < 4) - mw32(P0_SER_CTLSTAT + port * 4, val); - else - mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); -} - -static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) -{ - void __iomem *regs = mvi->regs + off; - void __iomem *regs2 = mvi->regs + off2; - return (port < 4)?readl(regs + port * 8): - readl(regs2 + (port - 4) * 8); -} - -static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, - u32 port, u32 val) -{ - void __iomem *regs = mvi->regs + off; - void __iomem *regs2 = mvi->regs + off2; - if (port < 4) - writel(val, regs + port * 8); - else - writel(val, regs2 + (port - 4) * 8); -} - -static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port); -} - -static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); -} - -static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) -{ - mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); -} - -static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port); -} - -static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); -} - -static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) -{ - mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); -} - -static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port); -} - -static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); -} - -static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); -} - -static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); -} - -static void __devinit mvs_phy_hacks(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - /* workaround for SATA R-ERR, to ignore phy glitch */ - tmp = mvs_cr32(regs, CMD_PHY_TIMER); - tmp &= ~(1 << 9); - tmp |= (1 << 10); - mvs_cw32(regs, CMD_PHY_TIMER, tmp); - - /* enable retry 127 times */ - mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); - - /* extend open frame timeout to max */ - tmp = mvs_cr32(regs, CMD_SAS_CTL0); - tmp &= ~0xffff; - tmp |= 0x3fff; - mvs_cw32(regs, CMD_SAS_CTL0, tmp); - - /* workaround for WDTIMEOUT , set to 550 ms */ - mvs_cw32(regs, CMD_WD_TIMER, 0x86470); - - /* not to halt for different port op during wideport link change */ - mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); - - /* workaround for Seagate disk not-found OOB sequence, recv - * COMINIT before sending out COMWAKE */ - tmp = mvs_cr32(regs, CMD_PHY_MODE_21); - tmp &= 0x0000ffff; - tmp |= 0x00fa0000; - mvs_cw32(regs, CMD_PHY_MODE_21, tmp); - - tmp = mvs_cr32(regs, CMD_PHY_TIMER); - tmp &= 0x1fffffff; - tmp |= (2U << 29); /* 8 ms retry */ - mvs_cw32(regs, CMD_PHY_TIMER, tmp); - - /* TEST - for phy decoding error, adjust voltage levels */ - mw32(P0_VSR_ADDR + 0, 0x8); - mw32(P0_VSR_DATA + 0, 0x2F0); - - mw32(P0_VSR_ADDR + 8, 0x8); - mw32(P0_VSR_DATA + 8, 0x2F0); - - mw32(P0_VSR_ADDR + 16, 0x8); - mw32(P0_VSR_DATA + 16, 0x2F0); - - mw32(P0_VSR_ADDR + 24, 0x8); - mw32(P0_VSR_DATA + 24, 0x2F0); - -} - -static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(PCS); - if (mvi->chip->n_phy <= 4) - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); - else - tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); - mw32(PCS, tmp); -} - -static void mvs_detect_porttype(struct mvs_info *mvi, int i) -{ - void __iomem *regs = mvi->regs; - u32 reg; - struct mvs_phy *phy = &mvi->phy[i]; - - /* TODO check & save device type */ - reg = mr32(GBL_PORT_TYPE); - - if (reg & MODE_SAS_SATA & (1 << i)) - phy->phy_type |= PORT_TYPE_SAS; - else - phy->phy_type |= PORT_TYPE_SATA; -} - -static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) -{ - u32 *s = (u32 *) buf; - - if (!s) - return NULL; - - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); - s[3] = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); - s[2] = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); - s[1] = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); - s[0] = mvs_read_port_cfg_data(mvi, i); - - return (void *)s; -} - -static u32 mvs_is_sig_fis_received(u32 irq_status) -{ - return irq_status & PHYEV_SIG_FIS; -} - -static void mvs_update_wideport(struct mvs_info *mvi, int i) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port; - int j, no; - - for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) - if (no & 1) { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, - port->wide_port_phymap); - } else { - mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); - mvs_write_port_cfg_data(mvi, no, 0); - } -} - -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) -{ - u32 tmp; - struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port;; - - tmp = mvs_read_phy_ctl(mvi, i); - - if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { - if (!port) - phy->phy_attached = 1; - return tmp; - } - - if (port) { - if (phy->phy_type & PORT_TYPE_SAS) { - port->wide_port_phymap &= ~(1U << i); - if (!port->wide_port_phymap) - port->port_attached = 0; - mvs_update_wideport(mvi, i); - } else if (phy->phy_type & PORT_TYPE_SATA) - port->port_attached = 0; - mvs_free_reg_set(mvi, phy->port); - phy->port = NULL; - phy->phy_attached = 0; - phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); - } - return 0; -} - -static void mvs_update_phyinfo(struct mvs_info *mvi, int i, - int get_st) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct pci_dev *pdev = mvi->pdev; - u32 tmp; - u64 tmp64; - - mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); - phy->dev_info = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; - - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - - if (get_st) { - phy->irq_status = mvs_read_port_irq_stat(mvi, i); - phy->phy_status = mvs_is_phy_ready(mvi, i); - } - - if (phy->phy_status) { - u32 phy_st; - struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; - - mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); - phy_st = mvs_read_port_cfg_data(mvi, i); - - sas_phy->linkrate = - (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; - phy->minimum_linkrate = - (phy->phy_status & - PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; - phy->maximum_linkrate = - (phy->phy_status & - PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; - - if (phy->phy_type & PORT_TYPE_SAS) { - /* Updated attached_sas_addr */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); - phy->att_dev_sas_addr = - (u64) mvs_read_port_cfg_data(mvi, i) << 32; - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); - phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); - phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); - phy->identify.device_type = - phy->att_dev_info & PORT_DEV_TYPE_MASK; - - if (phy->identify.device_type == SAS_END_DEV) - phy->identify.target_port_protocols = - SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) - phy->identify.target_port_protocols = - SAS_PROTOCOL_SMP; - if (phy_st & PHY_OOB_DTCTD) - sas_phy->oob_mode = SAS_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct sas_identify_frame); - } else if (phy->phy_type & PORT_TYPE_SATA) { - phy->identify.target_port_protocols = SAS_PROTOCOL_STP; - if (mvs_is_sig_fis_received(phy->irq_status)) { - phy->att_dev_sas_addr = i; /* temp */ - if (phy_st & PHY_OOB_DTCTD) - sas_phy->oob_mode = SATA_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct dev_to_host_fis); - mvs_get_d2h_reg(mvi, i, - (void *)sas_phy->frame_rcvd); - } else { - dev_printk(KERN_DEBUG, &pdev->dev, - "No sig fis\n"); - phy->phy_type &= ~(PORT_TYPE_SATA); - goto out_done; - } - } - tmp64 = cpu_to_be64(phy->att_dev_sas_addr); - memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); - - dev_printk(KERN_DEBUG, &pdev->dev, - "phy[%d] Get Attached Address 0x%llX ," - " SAS Address 0x%llX\n", - i, - (unsigned long long)phy->att_dev_sas_addr, - (unsigned long long)phy->dev_sas_addr); - dev_printk(KERN_DEBUG, &pdev->dev, - "Rate = %x , type = %d\n", - sas_phy->linkrate, phy->phy_type); - - /* workaround for HW phy decoding error on 1.5g disk drive */ - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); - tmp = mvs_read_port_vsr_data(mvi, i); - if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == - SAS_LINK_RATE_1_5_GBPS) - tmp &= ~PHY_MODE6_LATECLK; - else - tmp |= PHY_MODE6_LATECLK; - mvs_write_port_vsr_data(mvi, i, tmp); - - } -out_done: - if (get_st) - mvs_write_port_irq_stat(mvi, i, phy->irq_status); -} - -static void mvs_port_formed(struct asd_sas_phy *sas_phy) -{ - struct sas_ha_struct *sas_ha = sas_phy->ha; - struct mvs_info *mvi = sas_ha->lldd_ha; - struct asd_sas_port *sas_port = sas_phy->port; - struct mvs_phy *phy = sas_phy->lldd_phy; - struct mvs_port *port = &mvi->port[sas_port->id]; - unsigned long flags; - - spin_lock_irqsave(&mvi->lock, flags); - port->port_attached = 1; - phy->port = port; - port->taskfileset = MVS_ID_NOT_MAPPED; - if (phy->phy_type & PORT_TYPE_SAS) { - port->wide_port_phymap = sas_port->phy_mask; - mvs_update_wideport(mvi, sas_phy->id); - } - spin_unlock_irqrestore(&mvi->lock, flags); -} - -static int mvs_I_T_nexus_reset(struct domain_device *dev) -{ - return TMF_RESP_FUNC_FAILED; -} - -static int __devinit mvs_hw_init(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - int i; - u32 tmp, cctl; - - /* make sure interrupts are masked immediately (paranoia) */ - mw32(GBL_CTL, 0); - tmp = mr32(GBL_CTL); - - /* Reset Controller */ - if (!(tmp & HBA_RST)) { - if (mvi->flags & MVF_PHY_PWR_FIX) { - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp &= ~PCTL_PWR_ON; - tmp |= PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp &= ~PCTL_PWR_ON; - tmp |= PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - } - - /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ - mw32_f(GBL_CTL, HBA_RST); - } - - /* wait for reset to finish; timeout is just a guess */ - i = 1000; - while (i-- > 0) { - msleep(10); - - if (!(mr32(GBL_CTL) & HBA_RST)) - break; - } - if (mr32(GBL_CTL) & HBA_RST) { - dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); - return -EBUSY; - } - - /* Init Chip */ - /* make sure RST is set; HBA_RST /should/ have done that for us */ - cctl = mr32(CTL); - if (cctl & CCTL_RST) - cctl &= ~CCTL_RST; - else - mw32_f(CTL, cctl | CCTL_RST); - - /* write to device control _AND_ device status register? - A.C. */ - pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); - tmp &= ~PRD_REQ_MASK; - tmp |= PRD_REQ_SIZE; - pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp |= PCTL_PWR_ON; - tmp &= ~PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp |= PCTL_PWR_ON; - tmp &= ~PCTL_OFF; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - - mw32_f(CTL, cctl); - - /* reset control */ - mw32(PCS, 0); /*MVS_PCS */ - - mvs_phy_hacks(mvi); - - mw32(CMD_LIST_LO, mvi->slot_dma); - mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); - - mw32(RX_FIS_LO, mvi->rx_fis_dma); - mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); - - mw32(TX_CFG, MVS_CHIP_SLOT_SZ); - mw32(TX_LO, mvi->tx_dma); - mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); - - mw32(RX_CFG, MVS_RX_RING_SZ); - mw32(RX_LO, mvi->rx_dma); - mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); - - /* enable auto port detection */ - mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); - msleep(1100); - /* init and reset phys */ - for (i = 0; i < mvi->chip->n_phy; i++) { - u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); - u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); - - mvs_detect_porttype(mvi, i); - - /* set phy local SAS address */ - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); - mvs_write_port_cfg_data(mvi, i, lo); - mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); - mvs_write_port_cfg_data(mvi, i, hi); - - /* reset phy */ - tmp = mvs_read_phy_ctl(mvi, i); - tmp |= PHY_RST; - mvs_write_phy_ctl(mvi, i, tmp); - } - - msleep(100); - - for (i = 0; i < mvi->chip->n_phy; i++) { - /* clear phy int status */ - tmp = mvs_read_port_irq_stat(mvi, i); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_stat(mvi, i, tmp); - - /* set phy int mask */ - tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | - PHYEV_ID_DONE | PHYEV_DEC_ERR; - mvs_write_port_irq_mask(mvi, i, tmp); - - msleep(100); - mvs_update_phyinfo(mvi, i, 1); - mvs_enable_xmt(mvi, i); - } - - /* FIXME: update wide port bitmaps */ - - /* little endian for open address and command table, etc. */ - /* A.C. - * it seems that ( from the spec ) turning on big-endian won't - * do us any good on big-endian machines, need further confirmation - */ - cctl = mr32(CTL); - cctl |= CCTL_ENDIAN_CMD; - cctl |= CCTL_ENDIAN_DATA; - cctl &= ~CCTL_ENDIAN_OPEN; - cctl |= CCTL_ENDIAN_RSP; - mw32_f(CTL, cctl); - - /* reset CMD queue */ - tmp = mr32(PCS); - tmp |= PCS_CMD_RST; - mw32(PCS, tmp); - /* interrupt coalescing may cause missing HW interrput in some case, - * and the max count is 0x1ff, while our max slot is 0x200, - * it will make count 0. - */ - tmp = 0; - mw32(INT_COAL, tmp); - - tmp = 0x100; - mw32(INT_COAL_TMOUT, tmp); - - /* ladies and gentlemen, start your engines */ - mw32(TX_CFG, 0); - mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); - mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); - /* enable CMD/CMPL_Q/RESP mode */ - mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); - - /* enable completion queue interrupt */ - tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS); - mw32(INT_MASK, tmp); - - /* Enable SRS interrupt */ - mw32(INT_MASK_SRS, 0xFF); - return 0; -} - -static void __devinit mvs_print_info(struct mvs_info *mvi) -{ - struct pci_dev *pdev = mvi->pdev; - static int printed_version; - - if (!printed_version++) - dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); - - dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", - mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); -} - -static int __devinit mvs_pci_init(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - int rc; - struct mvs_info *mvi; - irq_handler_t irq_handler = mvs_interrupt; - - rc = pci_enable_device(pdev); - if (rc) - return rc; - - pci_set_master(pdev); - - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) - goto err_out_disable; - - rc = pci_go_64(pdev); - if (rc) - goto err_out_regions; - - mvi = mvs_alloc(pdev, ent); - if (!mvi) { - rc = -ENOMEM; - goto err_out_regions; - } - - rc = mvs_hw_init(mvi); - if (rc) - goto err_out_mvi; - -#ifndef MVS_DISABLE_MSI - if (!pci_enable_msi(pdev)) { - u32 tmp; - void __iomem *regs = mvi->regs; - mvi->flags |= MVF_MSI; - irq_handler = mvs_msi_interrupt; - tmp = mr32(PCS); - mw32(PCS, tmp | PCS_SELF_CLEAR); - } -#endif - - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); - if (rc) - goto err_out_msi; - - rc = scsi_add_host(mvi->shost, &pdev->dev); - if (rc) - goto err_out_irq; - - rc = sas_register_ha(&mvi->sas); - if (rc) - goto err_out_shost; - - pci_set_drvdata(pdev, mvi); - - mvs_print_info(mvi); - - mvs_hba_interrupt_enable(mvi); - - scsi_scan_host(mvi->shost); - - return 0; - -err_out_shost: - scsi_remove_host(mvi->shost); -err_out_irq: - free_irq(pdev->irq, mvi); -err_out_msi: - if (mvi->flags |= MVF_MSI) - pci_disable_msi(pdev); -err_out_mvi: - mvs_free(mvi); -err_out_regions: - pci_release_regions(pdev); -err_out_disable: - pci_disable_device(pdev); - return rc; -} - -static void __devexit mvs_pci_remove(struct pci_dev *pdev) -{ - struct mvs_info *mvi = pci_get_drvdata(pdev); - - pci_set_drvdata(pdev, NULL); - - if (mvi) { - sas_unregister_ha(&mvi->sas); - mvs_hba_interrupt_disable(mvi); - sas_remove_host(mvi->shost); - scsi_remove_host(mvi->shost); - - free_irq(pdev->irq, mvi); - if (mvi->flags & MVF_MSI) - pci_disable_msi(pdev); - mvs_free(mvi); - pci_release_regions(pdev); - } - pci_disable_device(pdev); -} - -static struct sas_domain_function_template mvs_transport_ops = { - .lldd_execute_task = mvs_task_exec, - .lldd_control_phy = mvs_phy_control, - .lldd_abort_task = mvs_task_abort, - .lldd_port_formed = mvs_port_formed, - .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, -}; - -static struct pci_device_id __devinitdata mvs_pci_table[] = { - { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, - { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, - { - .vendor = PCI_VENDOR_ID_MARVELL, - .device = 0x6440, - .subvendor = PCI_ANY_ID, - .subdevice = 0x6480, - .class = 0, - .class_mask = 0, - .driver_data = chip_6480, - }, - { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, - { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, - - { } /* terminate list */ -}; - -static struct pci_driver mvs_pci_driver = { - .name = DRV_NAME, - .id_table = mvs_pci_table, - .probe = mvs_pci_init, - .remove = __devexit_p(mvs_pci_remove), -}; - -static int __init mvs_init(void) -{ - int rc; - - mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); - if (!mvs_stt) - return -ENOMEM; - - rc = pci_register_driver(&mvs_pci_driver); - if (rc) - goto err_out; - - return 0; - -err_out: - sas_release_transport(mvs_stt); - return rc; -} - -static void __exit mvs_exit(void) -{ - pci_unregister_driver(&mvs_pci_driver); - sas_release_transport(mvs_stt); -} - -module_init(mvs_init); -module_exit(mvs_exit); - -MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); -MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); -MODULE_VERSION(DRV_VERSION); -MODULE_LICENSE("GPL"); -MODULE_DEVICE_TABLE(pci, mvs_pci_table); diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig new file mode 100644 index 000000000000..6de7af27e507 --- /dev/null +++ b/drivers/scsi/mvsas/Kconfig @@ -0,0 +1,42 @@ +# +# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver. +# +# Copyright 2007 Red Hat, Inc. +# Copyright 2008 Marvell. <kewei@marvell.com> +# +# This file is licensed under GPLv2. +# +# This file is part of the 88SE64XX/88SE94XX driver. +# +# The 88SE64XX/88SE94XX driver is free software; you can redistribute +# it and/or modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 of the +# License. +# +# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be +# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA +# +# + +config SCSI_MVSAS + tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support" + depends on PCI + select SCSI_SAS_LIBSAS + select FW_LOADER + help + This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s + PCI-E 88SE94XX chip based host adapters. + +config SCSI_MVSAS_DEBUG + bool "Compile in debug mode" + default y + depends on SCSI_MVSAS + help + Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, + the driver prints some messages to the console. diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile new file mode 100644 index 000000000000..52ac4264677d --- /dev/null +++ b/drivers/scsi/mvsas/Makefile @@ -0,0 +1,32 @@ +# +# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver. +# +# Copyright 2007 Red Hat, Inc. +# Copyright 2008 Marvell. <kewei@marvell.com> +# +# This file is licensed under GPLv2. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; version 2 of the +# License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +# USA + +ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y) + EXTRA_CFLAGS += -DMV_DEBUG +endif + +obj-$(CONFIG_SCSI_MVSAS) += mvsas.o +mvsas-y += mv_init.o \ + mv_sas.o \ + mv_64xx.o \ + mv_94xx.o diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c new file mode 100644 index 000000000000..10a5077b6aed --- /dev/null +++ b/drivers/scsi/mvsas/mv_64xx.c @@ -0,0 +1,793 @@ +/* + * Marvell 88SE64xx hardware specific + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#include "mv_sas.h" +#include "mv_64xx.h" +#include "mv_chips.h" + +static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i) +{ + void __iomem *regs = mvi->regs; + u32 reg; + struct mvs_phy *phy = &mvi->phy[i]; + + /* TODO check & save device type */ + reg = mr32(MVS_GBL_PORT_TYPE); + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + if (reg & MODE_SAS_SATA & (1 << i)) + phy->phy_type |= PORT_TYPE_SAS; + else + phy->phy_type |= PORT_TYPE_SATA; +} + +static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_PCS); + if (mvi->chip->n_phy <= 4) + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); + else + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); + mw32(MVS_PCS, tmp); +} + +static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + + mvs_phy_hacks(mvi); + + if (!(mvi->flags & MVF_FLAG_SOC)) { + /* TEST - for phy decoding error, adjust voltage levels */ + mw32(MVS_P0_VSR_ADDR + 0, 0x8); + mw32(MVS_P0_VSR_DATA + 0, 0x2F0); + + mw32(MVS_P0_VSR_ADDR + 8, 0x8); + mw32(MVS_P0_VSR_DATA + 8, 0x2F0); + + mw32(MVS_P0_VSR_ADDR + 16, 0x8); + mw32(MVS_P0_VSR_DATA + 16, 0x2F0); + + mw32(MVS_P0_VSR_ADDR + 24, 0x8); + mw32(MVS_P0_VSR_DATA + 24, 0x2F0); + } else { + int i; + /* disable auto port detection */ + mw32(MVS_GBL_PORT_TYPE, 0); + for (i = 0; i < mvi->chip->n_phy; i++) { + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7); + mvs_write_port_vsr_data(mvi, i, 0x90000000); + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9); + mvs_write_port_vsr_data(mvi, i, 0x50f2); + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11); + mvs_write_port_vsr_data(mvi, i, 0x0e); + } + } +} + +static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id) +{ + void __iomem *regs = mvi->regs; + u32 reg, tmp; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + if (phy_id < 4) + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, ®); + else + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, ®); + + } else + reg = mr32(MVS_PHY_CTL); + + tmp = reg; + if (phy_id < 4) + tmp |= (1U << phy_id) << PCTL_LINK_OFFS; + else + tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + if (phy_id < 4) { + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + mdelay(10); + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); + } else { + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + mdelay(10); + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg); + } + } else { + mw32(MVS_PHY_CTL, tmp); + mdelay(10); + mw32(MVS_PHY_CTL, reg); + } +} + +static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) +{ + u32 tmp; + tmp = mvs_read_port_irq_stat(mvi, phy_id); + tmp &= ~PHYEV_RDY_CH; + mvs_write_port_irq_stat(mvi, phy_id, tmp); + tmp = mvs_read_phy_ctl(mvi, phy_id); + if (hard) + tmp |= PHY_RST_HARD; + else + tmp |= PHY_RST; + mvs_write_phy_ctl(mvi, phy_id, tmp); + if (hard) { + do { + tmp = mvs_read_phy_ctl(mvi, phy_id); + } while (tmp & PHY_RST_HARD); + } +} + +static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + int i; + + /* make sure interrupts are masked immediately (paranoia) */ + mw32(MVS_GBL_CTL, 0); + tmp = mr32(MVS_GBL_CTL); + + /* Reset Controller */ + if (!(tmp & HBA_RST)) { + if (mvi->flags & MVF_PHY_PWR_FIX) { + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + } + } + + /* make sure interrupts are masked immediately (paranoia) */ + mw32(MVS_GBL_CTL, 0); + tmp = mr32(MVS_GBL_CTL); + + /* Reset Controller */ + if (!(tmp & HBA_RST)) { + /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ + mw32_f(MVS_GBL_CTL, HBA_RST); + } + + /* wait for reset to finish; timeout is just a guess */ + i = 1000; + while (i-- > 0) { + msleep(10); + + if (!(mr32(MVS_GBL_CTL) & HBA_RST)) + break; + } + if (mr32(MVS_GBL_CTL) & HBA_RST) { + dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n"); + return -EBUSY; + } + return 0; +} + +static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + if (!(mvi->flags & MVF_FLAG_SOC)) { + u32 offs; + if (phy_id < 4) + offs = PCR_PHY_CTL; + else { + offs = PCR_PHY_CTL2; + phy_id -= 4; + } + pci_read_config_dword(mvi->pdev, offs, &tmp); + tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); + pci_write_config_dword(mvi->pdev, offs, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); + mw32(MVS_PHY_CTL, tmp); + } +} + +static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + if (!(mvi->flags & MVF_FLAG_SOC)) { + u32 offs; + if (phy_id < 4) + offs = PCR_PHY_CTL; + else { + offs = PCR_PHY_CTL2; + phy_id -= 4; + } + pci_read_config_dword(mvi->pdev, offs, &tmp); + tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); + pci_write_config_dword(mvi->pdev, offs, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); + mw32(MVS_PHY_CTL, tmp); + } +} + +static int __devinit mvs_64xx_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + u32 tmp, cctl; + + if (mvi->pdev && mvi->pdev->revision == 0) + mvi->flags |= MVF_PHY_PWR_FIX; + if (!(mvi->flags & MVF_FLAG_SOC)) { + mvs_show_pcie_usage(mvi); + tmp = mvs_64xx_chip_reset(mvi); + if (tmp) + return tmp; + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + mw32(MVS_PHY_CTL, tmp); + } + + /* Init Chip */ + /* make sure RST is set; HBA_RST /should/ have done that for us */ + cctl = mr32(MVS_CTL) & 0xFFFF; + if (cctl & CCTL_RST) + cctl &= ~CCTL_RST; + else + mw32_f(MVS_CTL, cctl | CCTL_RST); + + if (!(mvi->flags & MVF_FLAG_SOC)) { + /* write to device control _AND_ device status register */ + pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); + tmp &= ~PRD_REQ_MASK; + tmp |= PRD_REQ_SIZE; + pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp &= ~PCTL_PWR_OFF; + tmp &= ~PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp &= PCTL_PWR_OFF; + tmp &= ~PCTL_PHY_DSBL; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + } else { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_COM_ON; + tmp &= ~PCTL_PHY_DSBL; + tmp |= PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + tmp &= ~PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + } + + /* reset control */ + mw32(MVS_PCS, 0); /* MVS_PCS */ + /* init phys */ + mvs_64xx_phy_hacks(mvi); + + /* enable auto port detection */ + mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); + + mw32(MVS_CMD_LIST_LO, mvi->slot_dma); + mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); + + mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); + mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); + + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); + mw32(MVS_TX_LO, mvi->tx_dma); + mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); + + mw32(MVS_RX_CFG, MVS_RX_RING_SZ); + mw32(MVS_RX_LO, mvi->rx_dma); + mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); + + for (i = 0; i < mvi->chip->n_phy; i++) { + /* set phy local SAS address */ + /* should set little endian SAS address to 64xx chip */ + mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI, + cpu_to_be64(mvi->phy[i].dev_sas_addr)); + + mvs_64xx_enable_xmt(mvi, i); + + mvs_64xx_phy_reset(mvi, i, 1); + msleep(500); + mvs_64xx_detect_porttype(mvi, i); + } + if (mvi->flags & MVF_FLAG_SOC) { + /* set select registers */ + writel(0x0E008000, regs + 0x000); + writel(0x59000008, regs + 0x004); + writel(0x20, regs + 0x008); + writel(0x20, regs + 0x00c); + writel(0x20, regs + 0x010); + writel(0x20, regs + 0x014); + writel(0x20, regs + 0x018); + writel(0x20, regs + 0x01c); + } + for (i = 0; i < mvi->chip->n_phy; i++) { + /* clear phy int status */ + tmp = mvs_read_port_irq_stat(mvi, i); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_stat(mvi, i, tmp); + + /* set phy int mask */ + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | + PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR | + PHYEV_DEC_ERR; + mvs_write_port_irq_mask(mvi, i, tmp); + + msleep(100); + mvs_update_phyinfo(mvi, i, 1); + } + + /* FIXME: update wide port bitmaps */ + + /* little endian for open address and command table, etc. */ + /* + * it seems that ( from the spec ) turning on big-endian won't + * do us any good on big-endian machines, need further confirmation + */ + cctl = mr32(MVS_CTL); + cctl |= CCTL_ENDIAN_CMD; + cctl |= CCTL_ENDIAN_DATA; + cctl &= ~CCTL_ENDIAN_OPEN; + cctl |= CCTL_ENDIAN_RSP; + mw32_f(MVS_CTL, cctl); + + /* reset CMD queue */ + tmp = mr32(MVS_PCS); + tmp |= PCS_CMD_RST; + mw32(MVS_PCS, tmp); + /* interrupt coalescing may cause missing HW interrput in some case, + * and the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + tmp = 0; + mw32(MVS_INT_COAL, tmp); + + tmp = 0x100; + mw32(MVS_INT_COAL_TMOUT, tmp); + + /* ladies and gentlemen, start your engines */ + mw32(MVS_TX_CFG, 0); + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); + mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); + /* enable CMD/CMPL_Q/RESP mode */ + mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | + PCS_CMD_EN | PCS_CMD_STOP_ERR); + + /* enable completion queue interrupt */ + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | + CINT_DMA_PCIE); + + mw32(MVS_INT_MASK, tmp); + + /* Enable SRS interrupt */ + mw32(MVS_INT_MASK_SRS_0, 0xFFFF); + + return 0; +} + +static int mvs_64xx_ioremap(struct mvs_info *mvi) +{ + if (!mvs_ioremap(mvi, 4, 2)) + return 0; + return -1; +} + +static void mvs_64xx_iounmap(struct mvs_info *mvi) +{ + mvs_iounmap(mvi->regs); + mvs_iounmap(mvi->regs_ex); +} + +static void mvs_64xx_interrupt_enable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + mw32(MVS_GBL_CTL, tmp | INT_EN); +} + +static void mvs_64xx_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + mw32(MVS_GBL_CTL, tmp & ~INT_EN); +} + +static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq) +{ + void __iomem *regs = mvi->regs; + u32 stat; + + if (!(mvi->flags & MVF_FLAG_SOC)) { + stat = mr32(MVS_GBL_INT_STAT); + + if (stat == 0 || stat == 0xffffffff) + return 0; + } else + stat = 1; + return stat; +} + +static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat) +{ + void __iomem *regs = mvi->regs; + + /* clear CMD_CMPLT ASAP */ + mw32_f(MVS_INT_STAT, CINT_DONE); +#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); +#endif + mvs_int_full(mvi); +#ifndef MVS_USE_TASKLET + spin_unlock(&mvi->lock); +#endif + return IRQ_HANDLED; +} + +static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx) +{ + u32 tmp; + mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32)); + mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); +} + +static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + if (type == PORT_TYPE_SATA) { + tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); + mw32(MVS_INT_STAT_SRS_0, tmp); + } + mw32(MVS_INT_STAT, CINT_CI_STOP); + tmp = mr32(MVS_PCS) | 0xFF00; + mw32(MVS_PCS, tmp); +} + +static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp, offs; + + if (*tfs == MVS_ID_NOT_MAPPED) + return; + + offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (*tfs < 16) { + tmp = mr32(MVS_PCS); + mw32(MVS_PCS, tmp & ~offs); + } else { + tmp = mr32(MVS_CTL); + mw32(MVS_CTL, tmp & ~offs); + } + + tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs); + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + + *tfs = MVS_ID_NOT_MAPPED; + return; +} + +static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + int i; + u32 tmp, offs; + void __iomem *regs = mvi->regs; + + if (*tfs != MVS_ID_NOT_MAPPED) + return 0; + + tmp = mr32(MVS_PCS); + + for (i = 0; i < mvi->chip->srs_sz; i++) { + if (i == 16) + tmp = mr32(MVS_CTL); + offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (!(tmp & offs)) { + *tfs = i; + + if (i < 16) + mw32(MVS_PCS, tmp | offs); + else + mw32(MVS_CTL, tmp | offs); + tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i); + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + return 0; + } + } + return MVS_ID_NOT_MAPPED; +} + +void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct mvs_prd *buf_prd = prd; + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } +} + +static int mvs_64xx_oob_done(struct mvs_info *mvi, int i) +{ + u32 phy_st; + mvs_write_port_cfg_addr(mvi, i, + PHYR_PHY_STAT); + phy_st = mvs_read_port_cfg_data(mvi, i); + if (phy_st & PHY_OOB_DTCTD) + return 1; + return 0; +} + +static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i, + struct sas_identify_frame *id) + +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + + phy->minimum_linkrate = + (phy->phy_status & + PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; + phy->maximum_linkrate = + (phy->phy_status & + PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; + + mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); + phy->dev_info = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); + phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); + phy->att_dev_sas_addr = + (u64) mvs_read_port_cfg_data(mvi, i) << 32; + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); + phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr); +} + +static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[i]; + /* workaround for HW phy decoding error on 1.5g disk drive */ + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); + tmp = mvs_read_port_vsr_data(mvi, i); + if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == + SAS_LINK_RATE_1_5_GBPS) + tmp &= ~PHY_MODE6_LATECLK; + else + tmp |= PHY_MODE6_LATECLK; + mvs_write_port_vsr_data(mvi, i, tmp); +} + +void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates) +{ + u32 lrmin = 0, lrmax = 0; + u32 tmp; + + tmp = mvs_read_phy_ctl(mvi, phy_id); + lrmin = (rates->minimum_linkrate << 8); + lrmax = (rates->maximum_linkrate << 12); + + if (lrmin) { + tmp &= ~(0xf << 8); + tmp |= lrmin; + } + if (lrmax) { + tmp &= ~(0xf << 12); + tmp |= lrmax; + } + mvs_write_phy_ctl(mvi, phy_id, tmp); + mvs_64xx_phy_reset(mvi, phy_id, 1); +} + +static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_PCS); + mw32(MVS_PCS, tmp & 0xFFFF); + mw32(MVS_PCS, tmp); + tmp = mr32(MVS_CTL); + mw32(MVS_CTL, tmp & 0xFFFF); + mw32(MVS_CTL, tmp); +} + + +u32 mvs_64xx_spi_read_data(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + return ior32(SPI_DATA_REG_64XX); +} + +void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data) +{ + void __iomem *regs = mvi->regs_ex; + iow32(SPI_DATA_REG_64XX, data); +} + + +int mvs_64xx_spi_buildcmd(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ) +{ + u32 dwTmp; + + dwTmp = ((u32)cmd << 24) | ((u32)length << 19); + if (read) + dwTmp |= 1U<<23; + + if (addr != MV_MAX_U32) { + dwTmp |= 1U<<22; + dwTmp |= (addr & 0x0003FFFF); + } + + *dwCmd = dwTmp; + return 0; +} + + +int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) +{ + void __iomem *regs = mvi->regs_ex; + int retry; + + for (retry = 0; retry < 1; retry++) { + iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE); + iow32(SPI_CMD_REG_64XX, cmd); + iow32(SPI_CTRL_REG_64XX, + SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART); + } + + return 0; +} + +int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) +{ + void __iomem *regs = mvi->regs_ex; + u32 i, dwTmp; + + for (i = 0; i < timeout; i++) { + dwTmp = ior32(SPI_CTRL_REG_64XX); + if (!(dwTmp & SPI_CTRL_SPISTART)) + return 0; + msleep(10); + } + + return -1; +} + +#ifndef DISABLE_HOTPLUG_DMA_FIX +void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) +{ + int i; + struct mvs_prd *buf_prd = prd; + buf_prd += from; + for (i = 0; i < MAX_SG_ENTRY - from; i++) { + buf_prd->addr = cpu_to_le64(buf_dma); + buf_prd->len = cpu_to_le32(buf_len); + ++buf_prd; + } +} +#endif + +const struct mvs_dispatch mvs_64xx_dispatch = { + "mv64xx", + mvs_64xx_init, + NULL, + mvs_64xx_ioremap, + mvs_64xx_iounmap, + mvs_64xx_isr, + mvs_64xx_isr_status, + mvs_64xx_interrupt_enable, + mvs_64xx_interrupt_disable, + mvs_read_phy_ctl, + mvs_write_phy_ctl, + mvs_read_port_cfg_data, + mvs_write_port_cfg_data, + mvs_write_port_cfg_addr, + mvs_read_port_vsr_data, + mvs_write_port_vsr_data, + mvs_write_port_vsr_addr, + mvs_read_port_irq_stat, + mvs_write_port_irq_stat, + mvs_read_port_irq_mask, + mvs_write_port_irq_mask, + mvs_get_sas_addr, + mvs_64xx_command_active, + mvs_64xx_issue_stop, + mvs_start_delivery, + mvs_rx_update, + mvs_int_full, + mvs_64xx_assign_reg_set, + mvs_64xx_free_reg_set, + mvs_get_prd_size, + mvs_get_prd_count, + mvs_64xx_make_prd, + mvs_64xx_detect_porttype, + mvs_64xx_oob_done, + mvs_64xx_fix_phy_info, + mvs_64xx_phy_work_around, + mvs_64xx_phy_set_link_rate, + mvs_hw_max_link_rate, + mvs_64xx_phy_disable, + mvs_64xx_phy_enable, + mvs_64xx_phy_reset, + mvs_64xx_stp_reset, + mvs_64xx_clear_active_cmds, + mvs_64xx_spi_read_data, + mvs_64xx_spi_write_data, + mvs_64xx_spi_buildcmd, + mvs_64xx_spi_issuecmd, + mvs_64xx_spi_waitdataready, +#ifndef DISABLE_HOTPLUG_DMA_FIX + mvs_64xx_fix_dma, +#endif +}; + diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h new file mode 100644 index 000000000000..42e947d9795e --- /dev/null +++ b/drivers/scsi/mvsas/mv_64xx.h @@ -0,0 +1,151 @@ +/* + * Marvell 88SE64xx hardware specific head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#ifndef _MVS64XX_REG_H_ +#define _MVS64XX_REG_H_ + +#include <linux/types.h> + +#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS + +/* enhanced mode registers (BAR4) */ +enum hw_registers { + MVS_GBL_CTL = 0x04, /* global control */ + MVS_GBL_INT_STAT = 0x08, /* global irq status */ + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + + MVS_PHY_CTL = 0x40, /* SOC PHY Control */ + MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ + + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ + + MVS_CTL = 0x100, /* SAS/SATA port configuration */ + MVS_PCS = 0x104, /* SAS/SATA port control/status */ + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ + MVS_CMD_LIST_HI = 0x10C, + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ + MVS_RX_FIS_HI = 0x114, + + MVS_TX_CFG = 0x120, /* TX configuration */ + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ + MVS_TX_HI = 0x128, + + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ + MVS_RX_CFG = 0x134, /* RX configuration */ + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ + MVS_RX_HI = 0x13C, + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ + + MVS_INT_COAL = 0x148, /* Int coalescing config */ + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ + MVS_INT_STAT = 0x150, /* Central int status */ + MVS_INT_MASK = 0x154, /* Central int enable */ + MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS_0 = 0x15C, + + /* ports 1-3 follow after this */ + MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ + MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ + /* ports 5-7 follow after this */ + MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */ + MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */ + + /* ports 1-3 follow after this */ + MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ + /* ports 5-7 follow after this */ + MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ + + MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ + MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ + + /* ports 1-3 follow after this */ + MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ + MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ + /* ports 5-7 follow after this */ + MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */ + MVS_P4_CFG_DATA = 0x234, /* Port4 config data */ + + /* ports 1-3 follow after this */ + MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ + MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ + /* ports 5-7 follow after this */ + MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */ + MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */ +}; + +enum pci_cfg_registers { + PCR_PHY_CTL = 0x40, + PCR_PHY_CTL2 = 0x90, + PCR_DEV_CTRL = 0xE8, + PCR_LINK_STAT = 0xF2, +}; + +/* SAS/SATA Vendor Specific Port Registers */ +enum sas_sata_vsp_regs { + VSR_PHY_STAT = 0x00, /* Phy Status */ + VSR_PHY_MODE1 = 0x01, /* phy tx */ + VSR_PHY_MODE2 = 0x02, /* tx scc */ + VSR_PHY_MODE3 = 0x03, /* pll */ + VSR_PHY_MODE4 = 0x04, /* VCO */ + VSR_PHY_MODE5 = 0x05, /* Rx */ + VSR_PHY_MODE6 = 0x06, /* CDR */ + VSR_PHY_MODE7 = 0x07, /* Impedance */ + VSR_PHY_MODE8 = 0x08, /* Voltage */ + VSR_PHY_MODE9 = 0x09, /* Test */ + VSR_PHY_MODE10 = 0x0A, /* Power */ + VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ + VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ + VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ +}; + +enum chip_register_bits { + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), +}; + +#define MAX_SG_ENTRY 64 + +struct mvs_prd { + __le64 addr; /* 64-bit buffer address */ + __le32 reserved; + __le32 len; /* 16-bit length */ +}; + +#define SPI_CTRL_REG 0xc0 +#define SPI_CTRL_VENDOR_ENABLE (1U<<29) +#define SPI_CTRL_SPIRDY (1U<<22) +#define SPI_CTRL_SPISTART (1U<<20) + +#define SPI_CMD_REG 0xc4 +#define SPI_DATA_REG 0xc8 + +#define SPI_CTRL_REG_64XX 0x10 +#define SPI_CMD_REG_64XX 0x14 +#define SPI_DATA_REG_64XX 0x18 + +#endif diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c new file mode 100644 index 000000000000..0940fae19d20 --- /dev/null +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -0,0 +1,672 @@ +/* + * Marvell 88SE94xx hardware specific + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#include "mv_sas.h" +#include "mv_94xx.h" +#include "mv_chips.h" + +static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i) +{ + u32 reg; + struct mvs_phy *phy = &mvi->phy[i]; + u32 phy_status; + + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3); + reg = mvs_read_port_vsr_data(mvi, i); + phy_status = ((reg & 0x3f0000) >> 16) & 0xff; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + switch (phy_status) { + case 0x10: + phy->phy_type |= PORT_TYPE_SAS; + break; + case 0x1d: + default: + phy->phy_type |= PORT_TYPE_SATA; + break; + } +} + +static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(MVS_PCS); + tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); + mw32(MVS_PCS, tmp); +} + +static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) +{ + u32 tmp; + + tmp = mvs_read_port_irq_stat(mvi, phy_id); + tmp &= ~PHYEV_RDY_CH; + mvs_write_port_irq_stat(mvi, phy_id, tmp); + if (hard) { + tmp = mvs_read_phy_ctl(mvi, phy_id); + tmp |= PHY_RST_HARD; + mvs_write_phy_ctl(mvi, phy_id, tmp); + do { + tmp = mvs_read_phy_ctl(mvi, phy_id); + } while (tmp & PHY_RST_HARD); + } else { + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + tmp |= PHY_RST; + mvs_write_port_vsr_data(mvi, phy_id, tmp); + } +} + +static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id) +{ + u32 tmp; + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); + tmp = mvs_read_port_vsr_data(mvi, phy_id); + mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000); +} + +static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) +{ + mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4); + mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); + mvs_write_port_vsr_addr(mvi, phy_id, 0x104); + mvs_write_port_vsr_data(mvi, phy_id, 0x00018080); + mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); + mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff); +} + +static int __devinit mvs_94xx_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + u32 tmp, cctl; + + mvs_show_pcie_usage(mvi); + if (mvi->flags & MVF_FLAG_SOC) { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_PHY_DSBL; + mw32(MVS_PHY_CTL, tmp); + } + + /* Init Chip */ + /* make sure RST is set; HBA_RST /should/ have done that for us */ + cctl = mr32(MVS_CTL) & 0xFFFF; + if (cctl & CCTL_RST) + cctl &= ~CCTL_RST; + else + mw32_f(MVS_CTL, cctl | CCTL_RST); + + if (mvi->flags & MVF_FLAG_SOC) { + tmp = mr32(MVS_PHY_CTL); + tmp &= ~PCTL_PWR_OFF; + tmp |= PCTL_COM_ON; + tmp &= ~PCTL_PHY_DSBL; + tmp |= PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + tmp &= ~PCTL_LINK_RST; + mw32(MVS_PHY_CTL, tmp); + msleep(100); + } + + /* reset control */ + mw32(MVS_PCS, 0); /* MVS_PCS */ + mw32(MVS_STP_REG_SET_0, 0); + mw32(MVS_STP_REG_SET_1, 0); + + /* init phys */ + mvs_phy_hacks(mvi); + + /* disable Multiplexing, enable phy implemented */ + mw32(MVS_PORTS_IMP, 0xFF); + + + mw32(MVS_PA_VSR_ADDR, 0x00000104); + mw32(MVS_PA_VSR_PORT, 0x00018080); + mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8); + mw32(MVS_PA_VSR_PORT, 0x0084ffff); + + /* set LED blink when IO*/ + mw32(MVS_PA_VSR_ADDR, 0x00000030); + tmp = mr32(MVS_PA_VSR_PORT); + tmp &= 0xFFFF00FF; + tmp |= 0x00003300; + mw32(MVS_PA_VSR_PORT, tmp); + + mw32(MVS_CMD_LIST_LO, mvi->slot_dma); + mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); + + mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); + mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); + + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); + mw32(MVS_TX_LO, mvi->tx_dma); + mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); + + mw32(MVS_RX_CFG, MVS_RX_RING_SZ); + mw32(MVS_RX_LO, mvi->rx_dma); + mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); + + for (i = 0; i < mvi->chip->n_phy; i++) { + mvs_94xx_phy_disable(mvi, i); + /* set phy local SAS address */ + mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, + (mvi->phy[i].dev_sas_addr)); + + mvs_94xx_enable_xmt(mvi, i); + mvs_94xx_phy_enable(mvi, i); + + mvs_94xx_phy_reset(mvi, i, 1); + msleep(500); + mvs_94xx_detect_porttype(mvi, i); + } + + if (mvi->flags & MVF_FLAG_SOC) { + /* set select registers */ + writel(0x0E008000, regs + 0x000); + writel(0x59000008, regs + 0x004); + writel(0x20, regs + 0x008); + writel(0x20, regs + 0x00c); + writel(0x20, regs + 0x010); + writel(0x20, regs + 0x014); + writel(0x20, regs + 0x018); + writel(0x20, regs + 0x01c); + } + for (i = 0; i < mvi->chip->n_phy; i++) { + /* clear phy int status */ + tmp = mvs_read_port_irq_stat(mvi, i); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_stat(mvi, i, tmp); + + /* set phy int mask */ + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | + PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ; + mvs_write_port_irq_mask(mvi, i, tmp); + + msleep(100); + mvs_update_phyinfo(mvi, i, 1); + } + + /* FIXME: update wide port bitmaps */ + + /* little endian for open address and command table, etc. */ + /* + * it seems that ( from the spec ) turning on big-endian won't + * do us any good on big-endian machines, need further confirmation + */ + cctl = mr32(MVS_CTL); + cctl |= CCTL_ENDIAN_CMD; + cctl |= CCTL_ENDIAN_DATA; + cctl &= ~CCTL_ENDIAN_OPEN; + cctl |= CCTL_ENDIAN_RSP; + mw32_f(MVS_CTL, cctl); + + /* reset CMD queue */ + tmp = mr32(MVS_PCS); + tmp |= PCS_CMD_RST; + mw32(MVS_PCS, tmp); + /* interrupt coalescing may cause missing HW interrput in some case, + * and the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + tmp = 0; + mw32(MVS_INT_COAL, tmp); + + tmp = 0x100; + mw32(MVS_INT_COAL_TMOUT, tmp); + + /* ladies and gentlemen, start your engines */ + mw32(MVS_TX_CFG, 0); + mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); + mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); + /* enable CMD/CMPL_Q/RESP mode */ + mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN | + PCS_CMD_EN | PCS_CMD_STOP_ERR); + + /* enable completion queue interrupt */ + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | + CINT_DMA_PCIE); + tmp |= CINT_PHY_MASK; + mw32(MVS_INT_MASK, tmp); + + /* Enable SRS interrupt */ + mw32(MVS_INT_MASK_SRS_0, 0xFFFF); + + return 0; +} + +static int mvs_94xx_ioremap(struct mvs_info *mvi) +{ + if (!mvs_ioremap(mvi, 2, -1)) { + mvi->regs_ex = mvi->regs + 0x10200; + mvi->regs += 0x20000; + if (mvi->id == 1) + mvi->regs += 0x4000; + return 0; + } + return -1; +} + +static void mvs_94xx_iounmap(struct mvs_info *mvi) +{ + if (mvi->regs) { + mvi->regs -= 0x20000; + if (mvi->id == 1) + mvi->regs -= 0x4000; + mvs_iounmap(mvi->regs); + } +} + +static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + tmp |= (IRQ_SAS_A | IRQ_SAS_B); + mw32(MVS_GBL_INT_STAT, tmp); + writel(tmp, regs + 0x0C); + writel(tmp, regs + 0x10); + writel(tmp, regs + 0x14); + writel(tmp, regs + 0x18); + mw32(MVS_GBL_CTL, tmp); +} + +static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex; + u32 tmp; + + tmp = mr32(MVS_GBL_CTL); + + tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); + mw32(MVS_GBL_INT_STAT, tmp); + writel(tmp, regs + 0x0C); + writel(tmp, regs + 0x10); + writel(tmp, regs + 0x14); + writel(tmp, regs + 0x18); + mw32(MVS_GBL_CTL, tmp); +} + +static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) +{ + void __iomem *regs = mvi->regs_ex; + u32 stat = 0; + if (!(mvi->flags & MVF_FLAG_SOC)) { + stat = mr32(MVS_GBL_INT_STAT); + + if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) + return 0; + } + return stat; +} + +static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) +{ + void __iomem *regs = mvi->regs; + + if (((stat & IRQ_SAS_A) && mvi->id == 0) || + ((stat & IRQ_SAS_B) && mvi->id == 1)) { + mw32_f(MVS_INT_STAT, CINT_DONE); + #ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); + #endif + mvs_int_full(mvi); + #ifndef MVS_USE_TASKLET + spin_unlock(&mvi->lock); + #endif + } + return IRQ_HANDLED; +} + +static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) +{ + u32 tmp; + mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32)); + do { + tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3)); + } while (tmp & 1 << (slot_idx % 32)); +} + +static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + if (type == PORT_TYPE_SATA) { + tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); + mw32(MVS_INT_STAT_SRS_0, tmp); + } + mw32(MVS_INT_STAT, CINT_CI_STOP); + tmp = mr32(MVS_PCS) | 0xFF00; + mw32(MVS_PCS, tmp); +} + +static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + u8 reg_set = *tfs; + + if (*tfs == MVS_ID_NOT_MAPPED) + return; + + mvi->sata_reg_set &= ~bit(reg_set); + if (reg_set < 32) { + w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); + tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set; + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + } else { + w_reg_set_enable(reg_set, mvi->sata_reg_set); + tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set; + if (tmp) + mw32(MVS_INT_STAT_SRS_1, tmp); + } + + *tfs = MVS_ID_NOT_MAPPED; + + return; +} + +static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) +{ + int i; + void __iomem *regs = mvi->regs; + + if (*tfs != MVS_ID_NOT_MAPPED) + return 0; + + i = mv_ffc64(mvi->sata_reg_set); + if (i > 32) { + mvi->sata_reg_set |= bit(i); + w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); + *tfs = i; + return 0; + } else if (i >= 0) { + mvi->sata_reg_set |= bit(i); + w_reg_set_enable(i, (u32)mvi->sata_reg_set); + *tfs = i; + return 0; + } + return MVS_ID_NOT_MAPPED; +} + +static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd) +{ + int i; + struct scatterlist *sg; + struct mvs_prd *buf_prd = prd; + for_each_sg(scatter, sg, nr, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } +} + +static int mvs_94xx_oob_done(struct mvs_info *mvi, int i) +{ + u32 phy_st; + phy_st = mvs_read_phy_ctl(mvi, i); + if (phy_st & PHY_READY_MASK) /* phy ready */ + return 1; + return 0; +} + +static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id, + struct sas_identify_frame *id) +{ + int i; + u32 id_frame[7]; + + for (i = 0; i < 7; i++) { + mvs_write_port_cfg_addr(mvi, port_id, + CONFIG_ID_FRAME0 + i * 4); + id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); + } + memcpy(id, id_frame, 28); +} + +static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id, + struct sas_identify_frame *id) +{ + int i; + u32 id_frame[7]; + + /* mvs_hexdump(28, (u8 *)id_frame, 0); */ + for (i = 0; i < 7; i++) { + mvs_write_port_cfg_addr(mvi, port_id, + CONFIG_ATT_ID_FRAME0 + i * 4); + id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); + mv_dprintk("94xx phy %d atta frame %d %x.\n", + port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); + } + /* mvs_hexdump(28, (u8 *)id_frame, 0); */ + memcpy(id, id_frame, 28); +} + +static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id) +{ + u32 att_dev_info = 0; + + att_dev_info |= id->dev_type; + if (id->stp_iport) + att_dev_info |= PORT_DEV_STP_INIT; + if (id->smp_iport) + att_dev_info |= PORT_DEV_SMP_INIT; + if (id->ssp_iport) + att_dev_info |= PORT_DEV_SSP_INIT; + if (id->stp_tport) + att_dev_info |= PORT_DEV_STP_TRGT; + if (id->smp_tport) + att_dev_info |= PORT_DEV_SMP_TRGT; + if (id->ssp_tport) + att_dev_info |= PORT_DEV_SSP_TRGT; + + att_dev_info |= (u32)id->phy_id<<24; + return att_dev_info; +} + +static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id) +{ + return mvs_94xx_make_dev_info(id); +} + +static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, + struct sas_identify_frame *id) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status); + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + sas_phy->linkrate += 0x8; + mv_dprintk("get link rate is %d\n", sas_phy->linkrate); + phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; + phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; + mvs_94xx_get_dev_identify_frame(mvi, i, id); + phy->dev_info = mvs_94xx_make_dev_info(id); + + if (phy->phy_type & PORT_TYPE_SAS) { + mvs_94xx_get_att_identify_frame(mvi, i, id); + phy->att_dev_info = mvs_94xx_make_att_info(id); + phy->att_dev_sas_addr = *(u64 *)id->sas_addr; + } else { + phy->att_dev_info = PORT_DEV_STP_TRGT | 1; + } + +} + +void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates) +{ + /* TODO */ +} + +static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_STP_REG_SET_0); + mw32(MVS_STP_REG_SET_0, 0); + mw32(MVS_STP_REG_SET_0, tmp); + tmp = mr32(MVS_STP_REG_SET_1); + mw32(MVS_STP_REG_SET_1, 0); + mw32(MVS_STP_REG_SET_1, tmp); +} + + +u32 mvs_94xx_spi_read_data(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + return mr32(SPI_RD_DATA_REG_94XX); +} + +void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + mw32(SPI_RD_DATA_REG_94XX, data); +} + + +int mvs_94xx_spi_buildcmd(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + u32 dwTmp; + + dwTmp = ((u32)cmd << 8) | ((u32)length << 4); + if (read) + dwTmp |= SPI_CTRL_READ_94XX; + + if (addr != MV_MAX_U32) { + mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL)); + dwTmp |= SPI_ADDR_VLD_94XX; + } + + *dwCmd = dwTmp; + return 0; +} + + +int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX); + + return 0; +} + +int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) +{ + void __iomem *regs = mvi->regs_ex - 0x10200; + u32 i, dwTmp; + + for (i = 0; i < timeout; i++) { + dwTmp = mr32(SPI_CTRL_REG_94XX); + if (!(dwTmp & SPI_CTRL_SpiStart_94XX)) + return 0; + msleep(10); + } + + return -1; +} + +#ifndef DISABLE_HOTPLUG_DMA_FIX +void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) +{ + int i; + struct mvs_prd *buf_prd = prd; + buf_prd += from; + for (i = 0; i < MAX_SG_ENTRY - from; i++) { + buf_prd->addr = cpu_to_le64(buf_dma); + buf_prd->im_len.len = cpu_to_le32(buf_len); + ++buf_prd; + } +} +#endif + +const struct mvs_dispatch mvs_94xx_dispatch = { + "mv94xx", + mvs_94xx_init, + NULL, + mvs_94xx_ioremap, + mvs_94xx_iounmap, + mvs_94xx_isr, + mvs_94xx_isr_status, + mvs_94xx_interrupt_enable, + mvs_94xx_interrupt_disable, + mvs_read_phy_ctl, + mvs_write_phy_ctl, + mvs_read_port_cfg_data, + mvs_write_port_cfg_data, + mvs_write_port_cfg_addr, + mvs_read_port_vsr_data, + mvs_write_port_vsr_data, + mvs_write_port_vsr_addr, + mvs_read_port_irq_stat, + mvs_write_port_irq_stat, + mvs_read_port_irq_mask, + mvs_write_port_irq_mask, + mvs_get_sas_addr, + mvs_94xx_command_active, + mvs_94xx_issue_stop, + mvs_start_delivery, + mvs_rx_update, + mvs_int_full, + mvs_94xx_assign_reg_set, + mvs_94xx_free_reg_set, + mvs_get_prd_size, + mvs_get_prd_count, + mvs_94xx_make_prd, + mvs_94xx_detect_porttype, + mvs_94xx_oob_done, + mvs_94xx_fix_phy_info, + NULL, + mvs_94xx_phy_set_link_rate, + mvs_hw_max_link_rate, + mvs_94xx_phy_disable, + mvs_94xx_phy_enable, + mvs_94xx_phy_reset, + NULL, + mvs_94xx_clear_active_cmds, + mvs_94xx_spi_read_data, + mvs_94xx_spi_write_data, + mvs_94xx_spi_buildcmd, + mvs_94xx_spi_issuecmd, + mvs_94xx_spi_waitdataready, +#ifndef DISABLE_HOTPLUG_DMA_FIX + mvs_94xx_fix_dma, +#endif +}; + diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h new file mode 100644 index 000000000000..23ed9b164669 --- /dev/null +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -0,0 +1,222 @@ +/* + * Marvell 88SE94xx hardware specific head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#ifndef _MVS94XX_REG_H_ +#define _MVS94XX_REG_H_ + +#include <linux/types.h> + +#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS + +enum hw_registers { + MVS_GBL_CTL = 0x04, /* global control */ + MVS_GBL_INT_STAT = 0x00, /* global irq status */ + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + + MVS_PHY_CTL = 0x40, /* SOC PHY Control */ + MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ + + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ + + MVS_CTL = 0x100, /* SAS/SATA port configuration */ + MVS_PCS = 0x104, /* SAS/SATA port control/status */ + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ + MVS_CMD_LIST_HI = 0x10C, + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ + MVS_RX_FIS_HI = 0x114, + MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */ + MVS_STP_REG_SET_1 = 0x11C, + MVS_TX_CFG = 0x120, /* TX configuration */ + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ + MVS_TX_HI = 0x128, + + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ + MVS_RX_CFG = 0x134, /* RX configuration */ + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ + MVS_RX_HI = 0x13C, + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ + + MVS_INT_COAL = 0x148, /* Int coalescing config */ + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ + MVS_INT_STAT = 0x150, /* Central int status */ + MVS_INT_MASK = 0x154, /* Central int enable */ + MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS_0 = 0x15C, + MVS_INT_STAT_SRS_1 = 0x160, + MVS_INT_MASK_SRS_1 = 0x164, + MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */ + MVS_NON_NCQ_ERR_1 = 0x16C, + MVS_CMD_ADDR = 0x170, /* Command register port (addr) */ + MVS_CMD_DATA = 0x174, /* Command register port (data) */ + MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */ + + /* ports 1-3 follow after this */ + MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */ + MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */ + /* ports 5-7 follow after this */ + MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */ + MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */ + + /* ports 1-3 follow after this */ + MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */ + /* ports 5-7 follow after this */ + MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */ + + /* ports 1-3 follow after this */ + MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */ + MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */ + /* ports 5-7 follow after this */ + MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */ + MVS_P4_CFG_DATA = 0x224, /* Port4 config data */ + + /* phys 1-3 follow after this */ + MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */ + MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */ + /* phys 1-3 follow after this */ + /* multiplexing */ + MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */ + MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ + MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ + MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ +}; + +enum pci_cfg_registers { + PCR_PHY_CTL = 0x40, + PCR_PHY_CTL2 = 0x90, + PCR_DEV_CTRL = 0x78, + PCR_LINK_STAT = 0x82, +}; + +/* SAS/SATA Vendor Specific Port Registers */ +enum sas_sata_vsp_regs { + VSR_PHY_STAT = 0x00 * 4, /* Phy Status */ + VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */ + VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */ + VSR_PHY_MODE3 = 0x03 * 4, /* pll */ + VSR_PHY_MODE4 = 0x04 * 4, /* VCO */ + VSR_PHY_MODE5 = 0x05 * 4, /* Rx */ + VSR_PHY_MODE6 = 0x06 * 4, /* CDR */ + VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */ + VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */ + VSR_PHY_MODE9 = 0x09 * 4, /* Test */ + VSR_PHY_MODE10 = 0x0A * 4, /* Power */ + VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */ + VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */ + VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */ +}; + +enum chip_register_bits { + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), +}; + +enum pci_interrupt_cause { + /* MAIN_IRQ_CAUSE (R10200) Bits*/ + IRQ_COM_IN_I2O_IOP0 = (1 << 0), + IRQ_COM_IN_I2O_IOP1 = (1 << 1), + IRQ_COM_IN_I2O_IOP2 = (1 << 2), + IRQ_COM_IN_I2O_IOP3 = (1 << 3), + IRQ_COM_OUT_I2O_HOS0 = (1 << 4), + IRQ_COM_OUT_I2O_HOS1 = (1 << 5), + IRQ_COM_OUT_I2O_HOS2 = (1 << 6), + IRQ_COM_OUT_I2O_HOS3 = (1 << 7), + IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), + IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), + IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), + IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), + IRQ_PCIF_DRBL0 = (1 << 12), + IRQ_PCIF_DRBL1 = (1 << 13), + IRQ_PCIF_DRBL2 = (1 << 14), + IRQ_PCIF_DRBL3 = (1 << 15), + IRQ_XOR_A = (1 << 16), + IRQ_XOR_B = (1 << 17), + IRQ_SAS_A = (1 << 18), + IRQ_SAS_B = (1 << 19), + IRQ_CPU_CNTRL = (1 << 20), + IRQ_GPIO = (1 << 21), + IRQ_UART = (1 << 22), + IRQ_SPI = (1 << 23), + IRQ_I2C = (1 << 24), + IRQ_SGPIO = (1 << 25), + IRQ_COM_ERR = (1 << 29), + IRQ_I2O_ERR = (1 << 30), + IRQ_PCIE_ERR = (1 << 31), +}; + +#define MAX_SG_ENTRY 255 + +struct mvs_prd_imt { + __le32 len:22; + u8 _r_a:2; + u8 misc_ctl:4; + u8 inter_sel:4; +}; + +struct mvs_prd { + /* 64-bit buffer address */ + __le64 addr; + /* 22-bit length */ + struct mvs_prd_imt im_len; +} __attribute__ ((packed)); + +#define SPI_CTRL_REG_94XX 0xc800 +#define SPI_ADDR_REG_94XX 0xc804 +#define SPI_WR_DATA_REG_94XX 0xc808 +#define SPI_RD_DATA_REG_94XX 0xc80c +#define SPI_CTRL_READ_94XX (1U << 2) +#define SPI_ADDR_VLD_94XX (1U << 1) +#define SPI_CTRL_SpiStart_94XX (1U << 0) + +#define mv_ffc(x) ffz(x) + +static inline int +mv_ffc64(u64 v) +{ + int i; + i = mv_ffc((u32)v); + if (i >= 0) + return i; + i = mv_ffc((u32)(v>>32)); + + if (i != 0) + return 32 + i; + + return -1; +} + +#define r_reg_set_enable(i) \ + (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \ + mr32(MVS_STP_REG_SET_0)) + +#define w_reg_set_enable(i, tmp) \ + (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \ + mw32(MVS_STP_REG_SET_0, tmp)) + +extern const struct mvs_dispatch mvs_94xx_dispatch; +#endif + diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h new file mode 100644 index 000000000000..a67e1c4172f9 --- /dev/null +++ b/drivers/scsi/mvsas/mv_chips.h @@ -0,0 +1,280 @@ +/* + * Marvell 88SE64xx/88SE94xx register IO interface + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + + +#ifndef _MV_CHIPS_H_ +#define _MV_CHIPS_H_ + +#define mr32(reg) readl(regs + reg) +#define mw32(reg, val) writel((val), regs + reg) +#define mw32_f(reg, val) do { \ + mw32(reg, val); \ + mr32(reg); \ + } while (0) + +#define iow32(reg, val) outl(val, (unsigned long)(regs + reg)) +#define ior32(reg) inl((unsigned long)(regs + reg)) +#define iow16(reg, val) outw((unsigned long)(val, regs + reg)) +#define ior16(reg) inw((unsigned long)(regs + reg)) +#define iow8(reg, val) outb((unsigned long)(val, regs + reg)) +#define ior8(reg) inb((unsigned long)(regs + reg)) + +static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr) +{ + void __iomem *regs = mvi->regs; + mw32(MVS_CMD_ADDR, addr); + return mr32(MVS_CMD_DATA); +} + +static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val) +{ + void __iomem *regs = mvi->regs; + mw32(MVS_CMD_ADDR, addr); + mw32(MVS_CMD_DATA, val); +} + +static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) +{ + void __iomem *regs = mvi->regs; + return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) : + mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4); +} + +static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) +{ + void __iomem *regs = mvi->regs; + if (port < 4) + mw32(MVS_P0_SER_CTLSTAT + port * 4, val); + else + mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val); +} + +static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, + u32 off2, u32 port) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + return (port < 4) ? readl(regs + port * 8) : + readl(regs2 + (port - 4) * 8); +} + +static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, + u32 port, u32 val) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + if (port < 4) + writel(val, regs + port * 8); + else + writel(val, regs2 + (port - 4) * 8); +} + +static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_CFG_DATA, + MVS_P4_CFG_DATA, port); +} + +static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, + u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_CFG_DATA, + MVS_P4_CFG_DATA, port, val); +} + +static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, + u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_CFG_ADDR, + MVS_P4_CFG_ADDR, port, addr); + mdelay(10); +} + +static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_VSR_DATA, + MVS_P4_VSR_DATA, port); +} + +static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, + u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_VSR_DATA, + MVS_P4_VSR_DATA, port, val); +} + +static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, + u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_VSR_ADDR, + MVS_P4_VSR_ADDR, port, addr); + mdelay(10); +} + +static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_STAT, + MVS_P4_INT_STAT, port); +} + +static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, + u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_STAT, + MVS_P4_INT_STAT, port, val); +} + +static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_MASK, + MVS_P4_INT_MASK, port); + +} + +static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, + u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_MASK, + MVS_P4_INT_MASK, port, val); +} + +static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi) +{ + u32 tmp; + + /* workaround for SATA R-ERR, to ignore phy glitch */ + tmp = mvs_cr32(mvi, CMD_PHY_TIMER); + tmp &= ~(1 << 9); + tmp |= (1 << 10); + mvs_cw32(mvi, CMD_PHY_TIMER, tmp); + + /* enable retry 127 times */ + mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f); + + /* extend open frame timeout to max */ + tmp = mvs_cr32(mvi, CMD_SAS_CTL0); + tmp &= ~0xffff; + tmp |= 0x3fff; + mvs_cw32(mvi, CMD_SAS_CTL0, tmp); + + /* workaround for WDTIMEOUT , set to 550 ms */ + mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); + + /* not to halt for different port op during wideport link change */ + mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); + + /* workaround for Seagate disk not-found OOB sequence, recv + * COMINIT before sending out COMWAKE */ + tmp = mvs_cr32(mvi, CMD_PHY_MODE_21); + tmp &= 0x0000ffff; + tmp |= 0x00fa0000; + mvs_cw32(mvi, CMD_PHY_MODE_21, tmp); + + tmp = mvs_cr32(mvi, CMD_PHY_TIMER); + tmp &= 0x1fffffff; + tmp |= (2U << 29); /* 8 ms retry */ + mvs_cw32(mvi, CMD_PHY_TIMER, tmp); +} + +static inline void mvs_int_sata(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(MVS_INT_STAT_SRS_0); + if (tmp) + mw32(MVS_INT_STAT_SRS_0, tmp); + MVS_CHIP_DISP->clear_active_cmds(mvi); +} + +static inline void mvs_int_full(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp, stat; + int i; + + stat = mr32(MVS_INT_STAT); + mvs_int_rx(mvi, false); + + for (i = 0; i < mvi->chip->n_phy; i++) { + tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); + if (tmp) + mvs_int_port(mvi, i, tmp); + } + + if (stat & CINT_SRS) + mvs_int_sata(mvi); + + mw32(MVS_INT_STAT, stat); +} + +static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx) +{ + void __iomem *regs = mvi->regs; + mw32(MVS_TX_PROD_IDX, tx); +} + +static inline u32 mvs_rx_update(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + return mr32(MVS_RX_CONS_IDX); +} + +static inline u32 mvs_get_prd_size(void) +{ + return sizeof(struct mvs_prd); +} + +static inline u32 mvs_get_prd_count(void) +{ + return MAX_SG_ENTRY; +} + +static inline void mvs_show_pcie_usage(struct mvs_info *mvi) +{ + u16 link_stat, link_spd; + const char *spd[] = { + "UnKnown", + "2.5", + "5.0", + }; + if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0) + return; + + pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat); + link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS; + if (link_spd >= 3) + link_spd = 0; + dev_printk(KERN_INFO, mvi->dev, + "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n", + (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS, + spd[link_spd]); +} + +static inline u32 mvs_hw_max_link_rate(void) +{ + return MAX_LINK_RATE; +} + +#endif /* _MV_CHIPS_H_ */ + diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h new file mode 100644 index 000000000000..f8cb9defb961 --- /dev/null +++ b/drivers/scsi/mvsas/mv_defs.h @@ -0,0 +1,502 @@ +/* + * Marvell 88SE64xx/88SE94xx const head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#ifndef _MV_DEFS_H_ +#define _MV_DEFS_H_ + + +enum chip_flavors { + chip_6320, + chip_6440, + chip_6485, + chip_9480, + chip_9180, +}; + +/* driver compile-time configuration */ +enum driver_configuration { + MVS_SLOTS = 512, /* command slots */ + MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ + MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ + /* software requires power-of-2 + ring size */ + MVS_SOC_SLOTS = 64, + MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2, + MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2, + + MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ + MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ + MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ + MVS_OAF_SZ = 64, /* Open address frame buffer size */ + MVS_QUEUE_SIZE = 32, /* Support Queue depth */ + MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */ + MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, +}; + +/* unchangeable hardware details */ +enum hardware_details { + MVS_MAX_PHYS = 8, /* max. possible phys */ + MVS_MAX_PORTS = 8, /* max. possible ports */ + MVS_SOC_PHYS = 4, /* soc phys */ + MVS_SOC_PORTS = 4, /* soc phys */ + MVS_MAX_DEVICES = 1024, /* max supported device */ +}; + +/* peripheral registers (BAR2) */ +enum peripheral_registers { + SPI_CTL = 0x10, /* EEPROM control */ + SPI_CMD = 0x14, /* EEPROM command */ + SPI_DATA = 0x18, /* EEPROM data */ +}; + +enum peripheral_register_bits { + TWSI_RDY = (1U << 7), /* EEPROM interface ready */ + TWSI_RD = (1U << 4), /* EEPROM read access */ + + SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ +}; + +enum hw_register_bits { + /* MVS_GBL_CTL */ + INT_EN = (1U << 1), /* Global int enable */ + HBA_RST = (1U << 0), /* HBA reset */ + + /* MVS_GBL_INT_STAT */ + INT_XOR = (1U << 4), /* XOR engine event */ + INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ + + /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ + SATA_TARGET = (1U << 16), /* port0 SATA target enable */ + MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ + MODE_AUTO_DET_PORT6 = (1U << 14), + MODE_AUTO_DET_PORT5 = (1U << 13), + MODE_AUTO_DET_PORT4 = (1U << 12), + MODE_AUTO_DET_PORT3 = (1U << 11), + MODE_AUTO_DET_PORT2 = (1U << 10), + MODE_AUTO_DET_PORT1 = (1U << 9), + MODE_AUTO_DET_PORT0 = (1U << 8), + MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | + MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | + MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | + MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, + MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ + MODE_SAS_PORT6_MASK = (1U << 6), + MODE_SAS_PORT5_MASK = (1U << 5), + MODE_SAS_PORT4_MASK = (1U << 4), + MODE_SAS_PORT3_MASK = (1U << 3), + MODE_SAS_PORT2_MASK = (1U << 2), + MODE_SAS_PORT1_MASK = (1U << 1), + MODE_SAS_PORT0_MASK = (1U << 0), + MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | + MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | + MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | + MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, + + /* SAS_MODE value may be + * dictated (in hw) by values + * of SATA_TARGET & AUTO_DET + */ + + /* MVS_TX_CFG */ + TX_EN = (1U << 16), /* Enable TX */ + TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ + + /* MVS_RX_CFG */ + RX_EN = (1U << 16), /* Enable RX */ + RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ + + /* MVS_INT_COAL */ + COAL_EN = (1U << 16), /* Enable int coalescing */ + + /* MVS_INT_STAT, MVS_INT_MASK */ + CINT_I2C = (1U << 31), /* I2C event */ + CINT_SW0 = (1U << 30), /* software event 0 */ + CINT_SW1 = (1U << 29), /* software event 1 */ + CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ + CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ + CINT_MEM = (1U << 26), /* int mem parity err */ + CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ + CINT_SRS = (1U << 3), /* SRS event */ + CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ + CINT_DONE = (1U << 0), /* cmd completion */ + + /* shl for ports 1-3 */ + CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ + CINT_PORT = (1U << 8), /* port0 event */ + CINT_PORT_MASK_OFFSET = 8, + CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), + CINT_PHY_MASK_OFFSET = 4, + CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET), + + /* TX (delivery) ring bits */ + TXQ_CMD_SHIFT = 29, + TXQ_CMD_SSP = 1, /* SSP protocol */ + TXQ_CMD_SMP = 2, /* SMP protocol */ + TXQ_CMD_STP = 3, /* STP/SATA protocol */ + TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ + TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ + TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ + TXQ_MODE_TARGET = 0, + TXQ_MODE_INITIATOR = 1, + TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ + TXQ_PRI_NORMAL = 0, + TXQ_PRI_HIGH = 1, + TXQ_SRS_SHIFT = 20, /* SATA register set */ + TXQ_SRS_MASK = 0x7f, + TXQ_PHY_SHIFT = 12, /* PHY bitmap */ + TXQ_PHY_MASK = 0xff, + TXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* RX (completion) ring bits */ + RXQ_GOOD = (1U << 23), /* Response good */ + RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ + RXQ_CMD_RX = (1U << 20), /* target cmd received */ + RXQ_ATTN = (1U << 19), /* attention */ + RXQ_RSP = (1U << 18), /* response frame xfer'd */ + RXQ_ERR = (1U << 17), /* err info rec xfer'd */ + RXQ_DONE = (1U << 16), /* cmd complete */ + RXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* mvs_cmd_hdr bits */ + MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ + MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ + + /* SSP initiator only */ + MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ + + /* SSP initiator or target */ + MCH_SSP_FR_TASK = 0x1, /* TASK frame */ + + /* SSP target only */ + MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ + MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ + MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ + MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ + + MCH_SSP_MODE_PASSTHRU = 1, + MCH_SSP_MODE_NORMAL = 0, + MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ + MCH_FBURST = (1U << 11), /* first burst (SSP) */ + MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ + MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ + MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ + MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ + MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ + MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ + MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ + MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ + + CCTL_RST = (1U << 5), /* port logic reset */ + + /* 0(LSB first), 1(MSB first) */ + CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ + CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ + CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ + CCTL_ENDIAN_CMD = (1U << 0), /* command table */ + + /* MVS_Px_SER_CTLSTAT (per-phy control) */ + PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ + PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ + PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ + PHY_RST = (1U << 0), /* phy reset */ + PHY_READY_MASK = (1U << 20), + + /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ + PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ + PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */ + PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */ + PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ + PHYEV_AN = (1U << 18), /* SATA async notification */ + PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ + PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ + PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ + PHYEV_IU_BIG = (1U << 11), /* IU too long err */ + PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ + PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ + PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ + PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ + PHYEV_PORT_SEL = (1U << 6), /* port selector present */ + PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ + PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ + PHYEV_ID_FAIL = (1U << 3), /* identify failed */ + PHYEV_ID_DONE = (1U << 2), /* identify done */ + PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ + PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ + + /* MVS_PCS */ + PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ + PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ + PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */ + PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ + PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ + PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */ + PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ + PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ + PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ + PCS_CMD_RST = (1U << 1), /* reset cmd issue */ + PCS_CMD_EN = (1U << 0), /* enable cmd issue */ + + /* Port n Attached Device Info */ + PORT_DEV_SSP_TRGT = (1U << 19), + PORT_DEV_SMP_TRGT = (1U << 18), + PORT_DEV_STP_TRGT = (1U << 17), + PORT_DEV_SSP_INIT = (1U << 11), + PORT_DEV_SMP_INIT = (1U << 10), + PORT_DEV_STP_INIT = (1U << 9), + PORT_PHY_ID_MASK = (0xFFU << 24), + PORT_SSP_TRGT_MASK = (0x1U << 19), + PORT_SSP_INIT_MASK = (0x1U << 11), + PORT_DEV_TRGT_MASK = (0x7U << 17), + PORT_DEV_INIT_MASK = (0x7U << 9), + PORT_DEV_TYPE_MASK = (0x7U << 0), + + /* Port n PHY Status */ + PHY_RDY = (1U << 2), + PHY_DW_SYNC = (1U << 1), + PHY_OOB_DTCTD = (1U << 0), + + /* VSR */ + /* PHYMODE 6 (CDB) */ + PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ + PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ + PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ + PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ + PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ + PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ + PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ + PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ + PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ + PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ + PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ + PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ + PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ + PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ +}; + +/* SAS/SATA configuration port registers, aka phy registers */ +enum sas_sata_config_port_regs { + PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ + PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ + PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ + PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ + PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ + PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ + PHYR_SATA_CTL = 0x18, /* SATA control */ + PHYR_PHY_STAT = 0x1C, /* PHY status */ + PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ + PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ + PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ + PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ + PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ + PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ + PHYR_WIDE_PORT = 0x38, /* wide port participating */ + PHYR_CURRENT0 = 0x80, /* current connection info 0 */ + PHYR_CURRENT1 = 0x84, /* current connection info 1 */ + PHYR_CURRENT2 = 0x88, /* current connection info 2 */ + CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */ + CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */ + CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */ + CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */ + CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */ + CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */ + CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */ + CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */ + CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */ + CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */ + CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */ + CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */ + CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */ + CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */ +}; + +enum sas_cmd_port_registers { + CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ + CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ + CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ + CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ + CMD_OOB_SPACE = 0x110, /* OOB space control register */ + CMD_OOB_BURST = 0x114, /* OOB burst control register */ + CMD_PHY_TIMER = 0x118, /* PHY timer control register */ + CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ + CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ + CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ + CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ + CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ + CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ + CMD_ID_TEST = 0x134, /* ID test register */ + CMD_PL_TIMER = 0x138, /* PL timer register */ + CMD_WD_TIMER = 0x13c, /* WD timer register */ + CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ + CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ + CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ + CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ + CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ + CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ + CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ + CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ + CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ + CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ + CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ + CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ + CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ + CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ + CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ + CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ + CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ + CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ + CMD_RESET_COUNT = 0x188, /* Reset Count */ + CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ + CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ + CMD_PHY_CTL = 0x194, /* PHY Control and Status */ + CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ + CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ + CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ + CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ + CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ + CMD_HOST_CTL = 0x1AC, /* Host Control Status */ + CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ + CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ + CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ + CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ + CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ + CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ +}; + +enum mvs_info_flags { + MVF_MSI = (1U << 0), /* MSI is enabled */ + MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ + MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ +}; + +enum mvs_event_flags { + PHY_PLUG_EVENT = (3U), + PHY_PLUG_IN = (1U << 0), /* phy plug in */ + PHY_PLUG_OUT = (1U << 1), /* phy plug out */ +}; + +enum mvs_port_type { + PORT_TGT_MASK = (1U << 5), + PORT_INIT_PORT = (1U << 4), + PORT_TGT_PORT = (1U << 3), + PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT), + PORT_TYPE_SAS = (1U << 1), + PORT_TYPE_SATA = (1U << 0), +}; + +/* Command Table Format */ +enum ct_format { + /* SSP */ + SSP_F_H = 0x00, + SSP_F_IU = 0x18, + SSP_F_MAX = 0x4D, + /* STP */ + STP_CMD_FIS = 0x00, + STP_ATAPI_CMD = 0x40, + STP_F_MAX = 0x10, + /* SMP */ + SMP_F_T = 0x00, + SMP_F_DEP = 0x01, + SMP_F_MAX = 0x101, +}; + +enum status_buffer { + SB_EIR_OFF = 0x00, /* Error Information Record */ + SB_RFB_OFF = 0x08, /* Response Frame Buffer */ + SB_RFB_MAX = 0x400, /* RFB size*/ +}; + +enum error_info_rec { + CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ + CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ + RSP_OVER = (1U << 29), /* rsp buffer overflow */ + RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ + UNK_FIS = (1U << 27), /* unknown FIS */ + DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ + SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ + TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ + R_ERR = (1U << 23), /* SATA returned R_ERR prim */ + RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ + XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ + UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ + DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ + INTERLOCK = (1U << 15), /* interlock error */ + NAK = (1U << 14), /* NAK rx'd */ + ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ + CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ + OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ + PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ + NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ + STP_RES_BSY = (1U << 8), /* STP resources busy */ + BREAK = (1U << 7), /* break received */ + BAD_DEST = (1U << 6), /* bad destination */ + BAD_PROTO = (1U << 5), /* protocol not supported */ + BAD_RATE = (1U << 4), /* cxn rate not supported */ + WRONG_DEST = (1U << 3), /* wrong destination error */ + CREDIT_TO = (1U << 2), /* credit timeout */ + WDOG_TO = (1U << 1), /* watchdog timeout */ + BUF_PAR = (1U << 0), /* buffer parity error */ +}; + +enum error_info_rec_2 { + SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ + GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ + APP_CHK_ERR = (1U << 13), /* Application Check error */ + REF_CHK_ERR = (1U << 12), /* Reference Check Error */ + USR_BLK_NM = (1U << 0), /* User Block Number */ +}; + +enum pci_cfg_register_bits { + PCTL_PWR_OFF = (0xFU << 24), + PCTL_COM_ON = (0xFU << 20), + PCTL_LINK_RST = (0xFU << 16), + PCTL_LINK_OFFS = (16), + PCTL_PHY_DSBL = (0xFU << 12), + PCTL_PHY_DSBL_OFFS = (12), + PRD_REQ_SIZE = (0x4000), + PRD_REQ_MASK = (0x00007000), + PLS_NEG_LINK_WD = (0x3FU << 4), + PLS_NEG_LINK_WD_OFFS = 4, + PLS_LINK_SPD = (0x0FU << 0), + PLS_LINK_SPD_OFFS = 0, +}; + +enum open_frame_protocol { + PROTOCOL_SMP = 0x0, + PROTOCOL_SSP = 0x1, + PROTOCOL_STP = 0x2, +}; + +/* define for response frame datapres field */ +enum datapres_field { + NO_DATA = 0, + RESPONSE_DATA = 1, + SENSE_DATA = 2, +}; + +/* define task management IU */ +struct mvs_tmf_task{ + u8 tmf; + u16 tag_of_task_to_be_managed; +}; +#endif diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c new file mode 100644 index 000000000000..8646a19f999d --- /dev/null +++ b/drivers/scsi/mvsas/mv_init.c @@ -0,0 +1,703 @@ +/* + * Marvell 88SE64xx/88SE94xx pci init + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + + +#include "mv_sas.h" + +static struct scsi_transport_template *mvs_stt; +static const struct mvs_chip_info mvs_chips[] = { + [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, + [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, + [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, + [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, + [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, +}; + +#define SOC_SAS_NUM 2 + +static struct scsi_host_template mvs_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = mvs_slave_configure, + .slave_destroy = sas_slave_destroy, + .scan_finished = mvs_scan_finished, + .scan_start = mvs_scan_start, + .change_queue_depth = sas_change_queue_depth, + .change_queue_type = sas_change_queue_type, + .bios_param = sas_bios_param, + .can_queue = 1, + .cmd_per_lun = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .slave_alloc = mvs_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +}; + +static struct sas_domain_function_template mvs_transport_ops = { + .lldd_dev_found = mvs_dev_found, + .lldd_dev_gone = mvs_dev_gone, + + .lldd_execute_task = mvs_queue_command, + .lldd_control_phy = mvs_phy_control, + + .lldd_abort_task = mvs_abort_task, + .lldd_abort_task_set = mvs_abort_task_set, + .lldd_clear_aca = mvs_clear_aca, + .lldd_clear_task_set = mvs_clear_task_set, + .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, + .lldd_lu_reset = mvs_lu_reset, + .lldd_query_task = mvs_query_task, + + .lldd_port_formed = mvs_port_formed, + .lldd_port_deformed = mvs_port_deformed, + +}; + +static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) +{ + struct mvs_phy *phy = &mvi->phy[phy_id]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + phy->mvi = mvi; + init_timer(&phy->timer); + sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; + sas_phy->class = SAS; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->type = PHY_TYPE_PHYSICAL; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + + sas_phy->id = phy_id; + sas_phy->sas_addr = &mvi->sas_addr[0]; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; + sas_phy->lldd_phy = phy; +} + +static void mvs_free(struct mvs_info *mvi) +{ + int i; + struct mvs_wq *mwq; + int slot_nr; + + if (!mvi) + return; + + if (mvi->flags & MVF_FLAG_SOC) + slot_nr = MVS_SOC_SLOTS; + else + slot_nr = MVS_SLOTS; + + for (i = 0; i < mvi->tags_num; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; + if (slot->buf) + dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ, + slot->buf, slot->buf_dma); + } + + if (mvi->tx) + dma_free_coherent(mvi->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + mvi->tx, mvi->tx_dma); + if (mvi->rx_fis) + dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, + mvi->rx_fis, mvi->rx_fis_dma); + if (mvi->rx) + dma_free_coherent(mvi->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + mvi->rx, mvi->rx_dma); + if (mvi->slot) + dma_free_coherent(mvi->dev, + sizeof(*mvi->slot) * slot_nr, + mvi->slot, mvi->slot_dma); +#ifndef DISABLE_HOTPLUG_DMA_FIX + if (mvi->bulk_buffer) + dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, + mvi->bulk_buffer, mvi->bulk_buffer_dma); +#endif + + MVS_CHIP_DISP->chip_iounmap(mvi); + if (mvi->shost) + scsi_host_put(mvi->shost); + list_for_each_entry(mwq, &mvi->wq_list, entry) + cancel_delayed_work(&mwq->work_q); + kfree(mvi); +} + +#ifdef MVS_USE_TASKLET +struct tasklet_struct mv_tasklet; +static void mvs_tasklet(unsigned long opaque) +{ + unsigned long flags; + u32 stat; + u16 core_nr, i = 0; + + struct mvs_info *mvi; + struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + + if (unlikely(!mvi)) + BUG_ON(1); + + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq); + if (stat) + MVS_CHIP_DISP->isr(mvi, mvi->irq, stat); + } + +} +#endif + +static irqreturn_t mvs_interrupt(int irq, void *opaque) +{ + u32 core_nr, i = 0; + u32 stat; + struct mvs_info *mvi; + struct sas_ha_struct *sha = opaque; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + + if (unlikely(!mvi)) + return IRQ_NONE; + + stat = MVS_CHIP_DISP->isr_status(mvi, irq); + if (!stat) + return IRQ_NONE; + +#ifdef MVS_USE_TASKLET + tasklet_schedule(&mv_tasklet); +#else + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + MVS_CHIP_DISP->isr(mvi, irq, stat); + } +#endif + return IRQ_HANDLED; +} + +static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) +{ + int i, slot_nr; + + if (mvi->flags & MVF_FLAG_SOC) + slot_nr = MVS_SOC_SLOTS; + else + slot_nr = MVS_SLOTS; + + spin_lock_init(&mvi->lock); + for (i = 0; i < mvi->chip->n_phy; i++) { + mvs_phy_init(mvi, i); + mvi->port[i].wide_port_phymap = 0; + mvi->port[i].port_attached = 0; + INIT_LIST_HEAD(&mvi->port[i].list); + } + for (i = 0; i < MVS_MAX_DEVICES; i++) { + mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; + mvi->devices[i].dev_type = NO_DEVICE; + mvi->devices[i].device_id = i; + mvi->devices[i].dev_status = MVS_DEV_NORMAL; + } + + /* + * alloc and init our DMA areas + */ + mvi->tx = dma_alloc_coherent(mvi->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + &mvi->tx_dma, GFP_KERNEL); + if (!mvi->tx) + goto err_out; + memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); + mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, + &mvi->rx_fis_dma, GFP_KERNEL); + if (!mvi->rx_fis) + goto err_out; + memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); + + mvi->rx = dma_alloc_coherent(mvi->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + &mvi->rx_dma, GFP_KERNEL); + if (!mvi->rx) + goto err_out; + memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); + mvi->rx[0] = cpu_to_le32(0xfff); + mvi->rx_cons = 0xfff; + + mvi->slot = dma_alloc_coherent(mvi->dev, + sizeof(*mvi->slot) * slot_nr, + &mvi->slot_dma, GFP_KERNEL); + if (!mvi->slot) + goto err_out; + memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); + +#ifndef DISABLE_HOTPLUG_DMA_FIX + mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, + TRASH_BUCKET_SIZE, + &mvi->bulk_buffer_dma, GFP_KERNEL); + if (!mvi->bulk_buffer) + goto err_out; +#endif + for (i = 0; i < slot_nr; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; + + slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ, + &slot->buf_dma, GFP_KERNEL); + if (!slot->buf) { + printk(KERN_DEBUG"failed to allocate slot->buf.\n"); + goto err_out; + } + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + ++mvi->tags_num; + } + /* Initialize tags */ + mvs_tag_init(mvi); + return 0; +err_out: + return 1; +} + + +int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) +{ + unsigned long res_start, res_len, res_flag, res_flag_ex = 0; + struct pci_dev *pdev = mvi->pdev; + if (bar_ex != -1) { + /* + * ioremap main and peripheral registers + */ + res_start = pci_resource_start(pdev, bar_ex); + res_len = pci_resource_len(pdev, bar_ex); + if (!res_start || !res_len) + goto err_out; + + res_flag_ex = pci_resource_flags(pdev, bar_ex); + if (res_flag_ex & IORESOURCE_MEM) { + if (res_flag_ex & IORESOURCE_CACHEABLE) + mvi->regs_ex = ioremap(res_start, res_len); + else + mvi->regs_ex = ioremap_nocache(res_start, + res_len); + } else + mvi->regs_ex = (void *)res_start; + if (!mvi->regs_ex) + goto err_out; + } + + res_start = pci_resource_start(pdev, bar); + res_len = pci_resource_len(pdev, bar); + if (!res_start || !res_len) + goto err_out; + + res_flag = pci_resource_flags(pdev, bar); + if (res_flag & IORESOURCE_CACHEABLE) + mvi->regs = ioremap(res_start, res_len); + else + mvi->regs = ioremap_nocache(res_start, res_len); + + if (!mvi->regs) { + if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) + iounmap(mvi->regs_ex); + mvi->regs_ex = NULL; + goto err_out; + } + + return 0; +err_out: + return -1; +} + +void mvs_iounmap(void __iomem *regs) +{ + iounmap(regs); +} + +static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent, + struct Scsi_Host *shost, unsigned int id) +{ + struct mvs_info *mvi; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info), + GFP_KERNEL); + if (!mvi) + return NULL; + + mvi->pdev = pdev; + mvi->dev = &pdev->dev; + mvi->chip_id = ent->driver_data; + mvi->chip = &mvs_chips[mvi->chip_id]; + INIT_LIST_HEAD(&mvi->wq_list); + mvi->irq = pdev->irq; + + ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; + ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; + + mvi->id = id; + mvi->sas = sha; + mvi->shost = shost; +#ifdef MVS_USE_TASKLET + tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha); +#endif + + if (MVS_CHIP_DISP->chip_ioremap(mvi)) + goto err_out; + if (!mvs_alloc(mvi, shost)) + return mvi; +err_out: + mvs_free(mvi); + return NULL; +} + +/* move to PCI layer or libata core? */ +static int pci_go_64(struct pci_dev *pdev) +{ + int rc; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + + return rc; +} + +static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost, + const struct mvs_chip_info *chip_info) +{ + int phy_nr, port_nr; unsigned short core_nr; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + core_nr = chip_info->n_host; + phy_nr = core_nr * chip_info->n_phy; + port_nr = phy_nr; + + memset(sha, 0x00, sizeof(struct sas_ha_struct)); + arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); + arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) + goto exit_free; + + sha->sas_phy = arr_phy; + sha->sas_port = arr_port; + + sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); + if (!sha->lldd_ha) + goto exit_free; + + ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; + + shost->transportt = mvs_stt; + shost->max_id = 128; + shost->max_lun = ~0; + shost->max_channel = 1; + shost->max_cmd_len = 16; + + return 0; +exit_free: + kfree(arr_phy); + kfree(arr_port); + return -1; + +} + +static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost, + const struct mvs_chip_info *chip_info) +{ + int can_queue, i = 0, j = 0; + struct mvs_info *mvi = NULL; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + + for (j = 0; j < nr_core; j++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; + for (i = 0; i < chip_info->n_phy; i++) { + sha->sas_phy[j * chip_info->n_phy + i] = + &mvi->phy[i].sas_phy; + sha->sas_port[j * chip_info->n_phy + i] = + &mvi->port[i].sas_port; + } + } + + sha->sas_ha_name = DRV_NAME; + sha->dev = mvi->dev; + sha->lldd_module = THIS_MODULE; + sha->sas_addr = &mvi->sas_addr[0]; + + sha->num_phys = nr_core * chip_info->n_phy; + + sha->lldd_max_execute_num = 1; + + if (mvi->flags & MVF_FLAG_SOC) + can_queue = MVS_SOC_CAN_QUEUE; + else + can_queue = MVS_CAN_QUEUE; + + sha->lldd_queue_size = can_queue; + shost->can_queue = can_queue; + mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys; + sha->core.shost = mvi->shost; +} + +static void mvs_init_sas_add(struct mvs_info *mvi) +{ + u8 i; + for (i = 0; i < mvi->chip->n_phy; i++) { + mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; + mvi->phy[i].dev_sas_addr = + cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); + } + + memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); +} + +static int __devinit mvs_pci_init(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + unsigned int rc, nhost = 0; + struct mvs_info *mvi; + irq_handler_t irq_handler = mvs_interrupt; + struct Scsi_Host *shost = NULL; + const struct mvs_chip_info *chip; + + dev_printk(KERN_INFO, &pdev->dev, + "mvsas: driver version %s\n", DRV_VERSION); + rc = pci_enable_device(pdev); + if (rc) + goto err_out_enable; + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + + rc = pci_go_64(pdev); + if (rc) + goto err_out_regions; + + shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); + if (!shost) { + rc = -ENOMEM; + goto err_out_regions; + } + + chip = &mvs_chips[ent->driver_data]; + SHOST_TO_SAS_HA(shost) = + kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); + if (!SHOST_TO_SAS_HA(shost)) { + kfree(shost); + rc = -ENOMEM; + goto err_out_regions; + } + + rc = mvs_prep_sas_ha_init(shost, chip); + if (rc) { + kfree(shost); + rc = -ENOMEM; + goto err_out_regions; + } + + pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); + + do { + mvi = mvs_pci_alloc(pdev, ent, shost, nhost); + if (!mvi) { + rc = -ENOMEM; + goto err_out_regions; + } + + mvs_init_sas_add(mvi); + + mvi->instance = nhost; + rc = MVS_CHIP_DISP->chip_init(mvi); + if (rc) { + mvs_free(mvi); + goto err_out_regions; + } + nhost++; + } while (nhost < chip->n_host); + + mvs_post_sas_ha_init(shost, chip); + + rc = scsi_add_host(shost, &pdev->dev); + if (rc) + goto err_out_shost; + + rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); + if (rc) + goto err_out_shost; + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, + DRV_NAME, SHOST_TO_SAS_HA(shost)); + if (rc) + goto err_not_sas; + + MVS_CHIP_DISP->interrupt_enable(mvi); + + scsi_scan_host(mvi->shost); + + return 0; + +err_not_sas: + sas_unregister_ha(SHOST_TO_SAS_HA(shost)); +err_out_shost: + scsi_remove_host(mvi->shost); +err_out_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); +err_out_enable: + return rc; +} + +static void __devexit mvs_pci_remove(struct pci_dev *pdev) +{ + unsigned short core_nr, i = 0; + struct sas_ha_struct *sha = pci_get_drvdata(pdev); + struct mvs_info *mvi = NULL; + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; + +#ifdef MVS_USE_TASKLET + tasklet_kill(&mv_tasklet); +#endif + + pci_set_drvdata(pdev, NULL); + sas_unregister_ha(sha); + sas_remove_host(mvi->shost); + scsi_remove_host(mvi->shost); + + MVS_CHIP_DISP->interrupt_disable(mvi); + free_irq(mvi->irq, sha); + for (i = 0; i < core_nr; i++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; + mvs_free(mvi); + } + kfree(sha->sas_phy); + kfree(sha->sas_port); + kfree(sha); + pci_release_regions(pdev); + pci_disable_device(pdev); + return; +} + +static struct pci_device_id __devinitdata mvs_pci_table[] = { + { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, + { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, + { + .vendor = PCI_VENDOR_ID_MARVELL, + .device = 0x6440, + .subvendor = PCI_ANY_ID, + .subdevice = 0x6480, + .class = 0, + .class_mask = 0, + .driver_data = chip_6485, + }, + { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, + { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, + { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, + { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, + + { } /* terminate list */ +}; + +static struct pci_driver mvs_pci_driver = { + .name = DRV_NAME, + .id_table = mvs_pci_table, + .probe = mvs_pci_init, + .remove = __devexit_p(mvs_pci_remove), +}; + +/* task handler */ +struct task_struct *mvs_th; +static int __init mvs_init(void) +{ + int rc; + mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); + if (!mvs_stt) + return -ENOMEM; + + rc = pci_register_driver(&mvs_pci_driver); + + if (rc) + goto err_out; + + return 0; + +err_out: + sas_release_transport(mvs_stt); + return rc; +} + +static void __exit mvs_exit(void) +{ + pci_unregister_driver(&mvs_pci_driver); + sas_release_transport(mvs_stt); +} + +module_init(mvs_init); +module_exit(mvs_exit); + +MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); +MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +#ifdef CONFIG_PCI +MODULE_DEVICE_TABLE(pci, mvs_pci_table); +#endif diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c new file mode 100644 index 000000000000..0d2138641214 --- /dev/null +++ b/drivers/scsi/mvsas/mv_sas.c @@ -0,0 +1,2154 @@ +/* + * Marvell 88SE64xx/88SE94xx main function + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#include "mv_sas.h" + +static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) +{ + if (task->lldd_task) { + struct mvs_slot_info *slot; + slot = task->lldd_task; + *tag = slot->slot_tag; + return 1; + } + return 0; +} + +void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +{ + void *bitmap = &mvi->tags; + clear_bit(tag, bitmap); +} + +void mvs_tag_free(struct mvs_info *mvi, u32 tag) +{ + mvs_tag_clear(mvi, tag); +} + +void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +{ + void *bitmap = &mvi->tags; + set_bit(tag, bitmap); +} + +inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +{ + unsigned int index, tag; + void *bitmap = &mvi->tags; + + index = find_first_zero_bit(bitmap, mvi->tags_num); + tag = index; + if (tag >= mvi->tags_num) + return -SAS_QUEUE_FULL; + mvs_tag_set(mvi, tag); + *tag_out = tag; + return 0; +} + +void mvs_tag_init(struct mvs_info *mvi) +{ + int i; + for (i = 0; i < mvi->tags_num; ++i) + mvs_tag_clear(mvi, i); +} + +void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) +{ + u32 i; + u32 run; + u32 offset; + + offset = 0; + while (size) { + printk(KERN_DEBUG"%08X : ", baseaddr + offset); + if (size >= 16) + run = 16; + else + run = size; + size -= run; + for (i = 0; i < 16; i++) { + if (i < run) + printk(KERN_DEBUG"%02X ", (u32)data[i]); + else + printk(KERN_DEBUG" "); + } + printk(KERN_DEBUG": "); + for (i = 0; i < run; i++) + printk(KERN_DEBUG"%c", + isalnum(data[i]) ? data[i] : '.'); + printk(KERN_DEBUG"\n"); + data = &data[16]; + offset += run; + } + printk(KERN_DEBUG"\n"); +} + +#if (_MV_DUMP > 1) +static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, + enum sas_protocol proto) +{ + u32 offset; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + + offset = slot->cmd_size + MVS_OAF_SZ + + MVS_CHIP_DISP->prd_size() * slot->n_elem; + dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n", + tag); + mvs_hexdump(32, (u8 *) slot->response, + (u32) slot->buf_dma + offset); +} +#endif + +static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, + enum sas_protocol proto) +{ +#if (_MV_DUMP > 1) + u32 sz, w_ptr; + u64 addr; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + + /*Delivery Queue */ + sz = MVS_CHIP_SLOT_SZ; + w_ptr = slot->tx; + addr = mvi->tx_dma; + dev_printk(KERN_DEBUG, mvi->dev, + "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); + dev_printk(KERN_DEBUG, mvi->dev, + "Delivery Queue Base Address=0x%llX (PA)" + "(tx_dma=0x%llX), Entry=%04d\n", + addr, (unsigned long long)mvi->tx_dma, w_ptr); + mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), + (u32) mvi->tx_dma + sizeof(u32) * w_ptr); + /*Command List */ + addr = mvi->slot_dma; + dev_printk(KERN_DEBUG, mvi->dev, + "Command List Base Address=0x%llX (PA)" + "(slot_dma=0x%llX), Header=%03d\n", + addr, (unsigned long long)slot->buf_dma, tag); + dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag); + /*mvs_cmd_hdr */ + mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), + (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); + /*1.command table area */ + dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n"); + mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); + /*2.open address frame area */ + dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n"); + mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, + (u32) slot->buf_dma + slot->cmd_size); + /*3.status buffer */ + mvs_hba_sb_dump(mvi, tag, proto); + /*4.PRD table */ + dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n"); + mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem, + (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, + (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); +#endif +} + +static void mvs_hba_cq_dump(struct mvs_info *mvi) +{ +#if (_MV_DUMP > 2) + u64 addr; + void __iomem *regs = mvi->regs; + u32 entry = mvi->rx_cons + 1; + u32 rx_desc = le32_to_cpu(mvi->rx[entry]); + + /*Completion Queue */ + addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); + dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n", + mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); + dev_printk(KERN_DEBUG, mvi->dev, + "Completion List Base Address=0x%llX (PA), " + "CQ_Entry=%04d, CQ_WP=0x%08X\n", + addr, entry - 1, mvi->rx[0]); + mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), + mvi->rx_dma + sizeof(u32) * entry); +#endif +} + +void mvs_get_sas_addr(void *buf, u32 buflen) +{ + /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/ +} + +struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) +{ + unsigned long i = 0, j = 0, hi = 0; + struct sas_ha_struct *sha = dev->port->ha; + struct mvs_info *mvi = NULL; + struct asd_sas_phy *phy; + + while (sha->sas_port[i]) { + if (sha->sas_port[i] == dev->port) { + phy = container_of(sha->sas_port[i]->phy_list.next, + struct asd_sas_phy, port_phy_el); + j = 0; + while (sha->sas_phy[j]) { + if (sha->sas_phy[j] == phy) + break; + j++; + } + break; + } + i++; + } + hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; + + return mvi; + +} + +/* FIXME */ +int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) +{ + unsigned long i = 0, j = 0, n = 0, num = 0; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + struct sas_ha_struct *sha = dev->port->ha; + + while (sha->sas_port[i]) { + if (sha->sas_port[i] == dev->port) { + struct asd_sas_phy *phy; + list_for_each_entry(phy, + &sha->sas_port[i]->phy_list, port_phy_el) { + j = 0; + while (sha->sas_phy[j]) { + if (sha->sas_phy[j] == phy) + break; + j++; + } + phyno[n] = (j >= mvi->chip->n_phy) ? + (j - mvi->chip->n_phy) : j; + num++; + n++; + } + break; + } + i++; + } + return num; +} + +static inline void mvs_free_reg_set(struct mvs_info *mvi, + struct mvs_device *dev) +{ + if (!dev) { + mv_printk("device has been free.\n"); + return; + } + if (dev->runing_req != 0) + return; + if (dev->taskfileset == MVS_ID_NOT_MAPPED) + return; + MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); +} + +static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, + struct mvs_device *dev) +{ + if (dev->taskfileset != MVS_ID_NOT_MAPPED) + return 0; + return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); +} + +void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) +{ + u32 no; + for_each_phy(phy_mask, phy_mask, no) { + if (!(phy_mask & 1)) + continue; + MVS_CHIP_DISP->phy_reset(mvi, no, hard); + } +} + +/* FIXME: locking? */ +int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + int rc = 0, phy_id = sas_phy->id; + u32 tmp, i = 0, hi; + struct sas_ha_struct *sha = sas_phy->ha; + struct mvs_info *mvi = NULL; + + while (sha->sas_phy[i]) { + if (sha->sas_phy[i] == sas_phy) + break; + i++; + } + hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; + + switch (func) { + case PHY_FUNC_SET_LINK_RATE: + MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); + break; + + case PHY_FUNC_HARD_RESET: + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); + if (tmp & PHY_RST_HARD) + break; + MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); + break; + + case PHY_FUNC_LINK_RESET: + MVS_CHIP_DISP->phy_enable(mvi, phy_id); + MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); + break; + + case PHY_FUNC_DISABLE: + MVS_CHIP_DISP->phy_disable(mvi, phy_id); + break; + case PHY_FUNC_RELEASE_SPINUP_HOLD: + default: + rc = -EOPNOTSUPP; + } + msleep(200); + return rc; +} + +void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, + u32 off_lo, u32 off_hi, u64 sas_addr) +{ + u32 lo = (u32)sas_addr; + u32 hi = (u32)(sas_addr>>32); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); + MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); + MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); +} + +static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct sas_ha_struct *sas_ha; + if (!phy->phy_attached) + return; + + if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) + && phy->phy_type & PORT_TYPE_SAS) { + return; + } + + sas_ha = mvi->sas; + sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); + + if (sas_phy->phy) { + struct sas_phy *sphy = sas_phy->phy; + + sphy->negotiated_linkrate = sas_phy->linkrate; + sphy->minimum_linkrate = phy->minimum_linkrate; + sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sphy->maximum_linkrate = phy->maximum_linkrate; + sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); + } + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /*Nothing*/ + } + mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); + + sas_phy->frame_rcvd_size = phy->frame_rcvd_size; + + mvi->sas->notify_port_event(sas_phy, + PORTE_BYTES_DMAED); +} + +int mvs_slave_alloc(struct scsi_device *scsi_dev) +{ + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + if (dev_is_sata(dev)) { + /* We don't need to rescan targets + * if REPORT_LUNS request is failed + */ + if (scsi_dev->lun > 0) + return -ENXIO; + scsi_dev->tagged_supported = 1; + } + + return sas_slave_alloc(scsi_dev); +} + +int mvs_slave_configure(struct scsi_device *sdev) +{ + struct domain_device *dev = sdev_to_domain_dev(sdev); + int ret = sas_slave_configure(sdev); + + if (ret) + return ret; + if (dev_is_sata(dev)) { + /* may set PIO mode */ + #if MV_DISABLE_NCQ + struct ata_port *ap = dev->sata_dev.ap; + struct ata_device *adev = ap->link.device; + adev->flags |= ATA_DFLAG_NCQ_OFF; + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); + #endif + } + return 0; +} + +void mvs_scan_start(struct Scsi_Host *shost) +{ + int i, j; + unsigned short core_nr; + struct mvs_info *mvi; + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + + core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; + + for (j = 0; j < core_nr; j++) { + mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; + for (i = 0; i < mvi->chip->n_phy; ++i) + mvs_bytes_dmaed(mvi, i); + } +} + +int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + scsi_flush_work(shost); + return 1; +} + +static int mvs_task_prep_smp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + int elem, rc, i; + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct domain_device *dev = task->dev; + struct asd_sas_port *sas_port = dev->port; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len, tag = tei->tag; + void *buf_tmp; + u8 *buf_oaf; + dma_addr_t buf_tmp_dma; + void *buf_prd; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#if _MV_DUMP + u8 *buf_cmd; + void *from; +#endif + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = SB_RFB_MAX; + + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ + buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + +#if _MV_DUMP + buf_cmd = buf_tmp; + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + buf_tmp += req_len; + buf_tmp_dma += req_len; + slot->cmd_size = req_len; +#else + hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table *********************************** */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; + + /* + * Fill in TX ring and command slot header + */ + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | + TXQ_MODE_I | tag | + (sas_port->phy_mask << TXQ_PHY_SHIFT)); + + hdr->flags |= flags; + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); + hdr->tags = cpu_to_le32(tag); + hdr->data_len = 0; + + /* generate open address frame hdr (first 12 bytes) */ + /* initiator, SMP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; + buf_oaf[1] = dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); + +#if _MV_DUMP + /* copy cmd table */ + from = kmap_atomic(sg_page(sg_req), KM_IRQ0); + memcpy(buf_cmd, from + sg_req->offset, req_len); + kunmap_atomic(from, KM_IRQ0); +#endif + return 0; + +err_out_2: + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ) { + *tag = qc->tag; + return 1; + } + } + + return 0; +} + +static int mvs_task_prep_ata(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + struct sas_task *task = tei->task; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = dev->lldd_dev; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct asd_sas_port *sas_port = dev->port; + struct mvs_slot_info *slot; + void *buf_prd; + u32 tag = tei->tag, hdr_tag; + u32 flags, del_q; + void *buf_tmp; + u8 *buf_cmd, *buf_oaf; + dma_addr_t buf_tmp_dma; + u32 i, req_len, resp_len; + const u32 max_resp_len = SB_RFB_MAX; + + if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { + mv_dprintk("Have not enough regiset for dev %d.\n", + mvi_dev->device_id); + return -EBUSY; + } + slot = &mvi->slot_info[tag]; + slot->tx = mvi->tx_prod; + del_q = TXQ_MODE_I | tag | + (TXQ_CMD_STP << TXQ_CMD_SHIFT) | + (sas_port->phy_mask << TXQ_PHY_SHIFT) | + (mvi_dev->taskfileset << TXQ_SRS_SHIFT); + mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); + +#ifndef DISABLE_HOTPLUG_DMA_FIX + if (task->data_dir == DMA_FROM_DEVICE) + flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); + else + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#else + flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#endif + if (task->ata_task.use_ncq) + flags |= MCH_FPDMA; + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { + if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) + flags |= MCH_ATAPI; + } + + /* FIXME: fill in port multiplier number */ + + hdr->flags = cpu_to_le32(flags); + + /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ + if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + else + hdr_tag = tag; + + hdr->tags = cpu_to_le32(hdr_tag); + + hdr->data_len = cpu_to_le32(task->total_xfer_len); + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ + buf_cmd = buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_ATA_CMD_SZ; + buf_tmp_dma += MVS_ATA_CMD_SZ; +#if _MV_DUMP + slot->cmd_size = MVS_ATA_CMD_SZ; +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + /* used for STP. unused for SATA? */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); + + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + /* FIXME: probably unused, for SATA. kept here just in case + * we get a STP/SATA error information record + */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; + + req_len = sizeof(struct host_to_dev_fis); + resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - + sizeof(struct mvs_err_info) - i; + + /* request, response lengths */ + resp_len = min(resp_len, max_resp_len); + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); + + if (likely(!task->ata_task.device_control_reg_update)) + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + /* fill in command FIS and ATAPI CDB */ + memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) + memcpy(buf_cmd + STP_ATAPI_CMD, + task->ata_task.atapi_packet, 16); + + /* generate open address frame hdr (first 12 bytes) */ + /* initiator, STP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; + buf_oaf[1] = dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); +#ifndef DISABLE_HOTPLUG_DMA_FIX + if (task->data_dir == DMA_FROM_DEVICE) + MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, + TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); +#endif + return 0; +} + +static int mvs_task_prep_ssp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei, int is_tmf, + struct mvs_tmf_task *tmf) +{ + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct mvs_port *port = tei->port; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = dev->lldd_dev; + struct asd_sas_port *sas_port = dev->port; + struct mvs_slot_info *slot; + void *buf_prd; + struct ssp_frame_hdr *ssp_hdr; + void *buf_tmp; + u8 *buf_cmd, *buf_oaf, fburst = 0; + dma_addr_t buf_tmp_dma; + u32 flags; + u32 resp_len, req_len, i, tag = tei->tag; + const u32 max_resp_len = SB_RFB_MAX; + u32 phy_mask; + + slot = &mvi->slot_info[tag]; + + phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : + sas_port->phy_mask) & TXQ_PHY_MASK; + + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | + (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | + (phy_mask << TXQ_PHY_SHIFT)); + + flags = MCH_RETRY; + if (task->ssp_task.enable_first_burst) { + flags |= MCH_FBURST; + fburst = (1 << 7); + } + if (is_tmf) + flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); + else + flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); + hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); + hdr->tags = cpu_to_le32(tag); + hdr->data_len = cpu_to_le32(task->total_xfer_len); + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + buf_cmd = buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_SSP_CMD_SZ; + buf_tmp_dma += MVS_SSP_CMD_SZ; +#if _MV_DUMP + slot->cmd_size = MVS_SSP_CMD_SZ; +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = MVS_CHIP_DISP->prd_size() * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + if (mvi->flags & MVF_FLAG_SOC) + hdr->reserved[0] = 0; + + resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - + sizeof(struct mvs_err_info) - i; + resp_len = min(resp_len, max_resp_len); + + req_len = sizeof(struct ssp_frame_hdr) + 28; + + /* request, response lengths */ + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); + + /* generate open address frame hdr (first 12 bytes) */ + /* initiator, SSP, ftype 1h */ + buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; + buf_oaf[1] = dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); + memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in SSP frame header (Command Table.SSP frame header) */ + ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; + + if (is_tmf) + ssp_hdr->frame_type = SSP_TASK; + else + ssp_hdr->frame_type = SSP_COMMAND; + + memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + memcpy(ssp_hdr->hashed_src_addr, + dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + ssp_hdr->tag = cpu_to_be16(tag); + + /* fill in IU for TASK and Command Frame */ + buf_cmd += sizeof(*ssp_hdr); + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + + if (ssp_hdr->frame_type != SSP_TASK) { + buf_cmd[9] = fburst | task->ssp_task.task_attr | + (task->ssp_task.task_prio << 3); + memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); + } else{ + buf_cmd[10] = tmf->tmf; + switch (tmf->tmf) { + case TMF_ABORT_TASK: + case TMF_QUERY_TASK: + buf_cmd[12] = + (tmf->tag_of_task_to_be_managed >> 8) & 0xff; + buf_cmd[13] = + tmf->tag_of_task_to_be_managed & 0xff; + break; + default: + break; + } + } + /* fill in PRD (scatter/gather) table, if any */ + MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); + return 0; +} + +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) +static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, + struct completion *completion,int is_tmf, + struct mvs_tmf_task *tmf) +{ + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + struct mvs_task_exec_info tei; + struct sas_task *t = task; + struct mvs_slot_info *slot; + u32 tag = 0xdeadbeef, rc, n_elem = 0; + u32 n = num, pass = 0; + unsigned long flags = 0; + + if (!dev->port) { + struct task_status_struct *tsm = &t->task_status; + + tsm->resp = SAS_TASK_UNDELIVERED; + tsm->stat = SAS_PHY_DOWN; + t->task_done(t); + return 0; + } + + spin_lock_irqsave(&mvi->lock, flags); + do { + dev = t->dev; + mvi_dev = dev->lldd_dev; + if (DEV_IS_GONE(mvi_dev)) { + if (mvi_dev) + mv_dprintk("device %d not ready.\n", + mvi_dev->device_id); + else + mv_dprintk("device %016llx not ready.\n", + SAS_ADDR(dev->sas_addr)); + + rc = SAS_PHY_DOWN; + goto out_done; + } + + if (dev->port->id >= mvi->chip->n_phy) + tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy]; + else + tei.port = &mvi->port[dev->port->id]; + + if (!tei.port->port_attached) { + if (sas_protocol_ata(t->task_proto)) { + mv_dprintk("port %d does not" + "attached device.\n", dev->port->id); + rc = SAS_PHY_DOWN; + goto out_done; + } else { + struct task_status_struct *ts = &t->task_status; + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + t->task_done(t); + if (n > 1) + t = list_entry(t->list.next, + struct sas_task, list); + continue; + } + } + + if (!sas_protocol_ata(t->task_proto)) { + if (t->num_scatter) { + n_elem = dma_map_sg(mvi->dev, + t->scatter, + t->num_scatter, + t->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto err_out; + } + } + } else { + n_elem = t->num_scatter; + } + + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + + slot = &mvi->slot_info[tag]; + + + t->lldd_task = NULL; + slot->n_elem = n_elem; + slot->slot_tag = tag; + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + + tei.task = t; + tei.hdr = &mvi->slot[tag]; + tei.tag = tag; + tei.n_elem = n_elem; + switch (t->task_proto) { + case SAS_PROTOCOL_SMP: + rc = mvs_task_prep_smp(mvi, &tei); + break; + case SAS_PROTOCOL_SSP: + rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + rc = mvs_task_prep_ata(mvi, &tei); + break; + default: + dev_printk(KERN_ERR, mvi->dev, + "unknown sas_task proto: 0x%x\n", + t->task_proto); + rc = -EINVAL; + break; + } + + if (rc) { + mv_dprintk("rc is %x\n", rc); + goto err_out_tag; + } + slot->task = t; + slot->port = tei.port; + t->lldd_task = slot; + list_add_tail(&slot->entry, &tei.port->list); + /* TODO: select normal or high priority */ + spin_lock(&t->task_state_lock); + t->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock(&t->task_state_lock); + + mvs_hba_memory_dump(mvi, tag, t->task_proto); + mvi_dev->runing_req++; + ++pass; + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); + if (n > 1) + t = list_entry(t->list.next, struct sas_task, list); + } while (--n); + rc = 0; + goto out_done; + +err_out_tag: + mvs_tag_free(mvi, tag); +err_out: + + dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); + if (!sas_protocol_ata(t->task_proto)) + if (n_elem) + dma_unmap_sg(mvi->dev, t->scatter, n_elem, + t->data_dir); +out_done: + if (likely(pass)) { + MVS_CHIP_DISP->start_delivery(mvi, + (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); + } + spin_unlock_irqrestore(&mvi->lock, flags); + return rc; +} + +int mvs_queue_command(struct sas_task *task, const int num, + gfp_t gfp_flags) +{ + return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); +} + +static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + mvs_tag_clear(mvi, slot_idx); +} + +static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, + struct mvs_slot_info *slot, u32 slot_idx) +{ + if (!slot->task) + return; + if (!sas_protocol_ata(task->task_proto)) + if (slot->n_elem) + dma_unmap_sg(mvi->dev, task->scatter, + slot->n_elem, task->data_dir); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); + dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SSP: + default: + /* do nothing */ + break; + } + list_del_init(&slot->entry); + task->lldd_task = NULL; + slot->task = NULL; + slot->port = NULL; + slot->slot_tag = 0xFFFFFFFF; + mvs_slot_free(mvi, slot_idx); +} + +static void mvs_update_wideport(struct mvs_info *mvi, int i) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port; + int j, no; + + for_each_phy(port->wide_port_phymap, j, no) { + if (j & 1) { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, + PHYR_WIDE_PORT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, + port->wide_port_phymap); + } else { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, + PHYR_WIDE_PORT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, no, + 0); + } + } +} + +static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port; + + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); + if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { + if (!port) + phy->phy_attached = 1; + return tmp; + } + + if (port) { + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap &= ~(1U << i); + if (!port->wide_port_phymap) + port->port_attached = 0; + mvs_update_wideport(mvi, i); + } else if (phy->phy_type & PORT_TYPE_SATA) + port->port_attached = 0; + phy->port = NULL; + phy->phy_attached = 0; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + } + return 0; +} + +static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) +{ + u32 *s = (u32 *) buf; + + if (!s) + return NULL; + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); + s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); + s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); + s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); + s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); + + /* Workaround: take some ATAPI devices for ATA */ + if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) + s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); + + return s; +} + +static u32 mvs_is_sig_fis_received(u32 irq_status) +{ + return irq_status & PHYEV_SIG_FIS; +} + +void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct sas_identify_frame *id; + + id = (struct sas_identify_frame *)phy->frame_rcvd; + + if (get_st) { + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); + phy->phy_status = mvs_is_phy_ready(mvi, i); + } + + if (phy->phy_status) { + int oob_done = 0; + struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; + + oob_done = MVS_CHIP_DISP->oob_done(mvi, i); + + MVS_CHIP_DISP->fix_phy_info(mvi, i, id); + if (phy->phy_type & PORT_TYPE_SATA) { + phy->identify.target_port_protocols = SAS_PROTOCOL_STP; + if (mvs_is_sig_fis_received(phy->irq_status)) { + phy->phy_attached = 1; + phy->att_dev_sas_addr = + i + mvi->id * mvi->chip->n_phy; + if (oob_done) + sas_phy->oob_mode = SATA_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct dev_to_host_fis); + mvs_get_d2h_reg(mvi, i, id); + } else { + u32 tmp; + dev_printk(KERN_DEBUG, mvi->dev, + "Phy%d : No sig fis\n", i); + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); + MVS_CHIP_DISP->write_port_irq_mask(mvi, i, + tmp | PHYEV_SIG_FIS); + phy->phy_attached = 0; + phy->phy_type &= ~PORT_TYPE_SATA; + MVS_CHIP_DISP->phy_reset(mvi, i, 0); + goto out_done; + } + } else if (phy->phy_type & PORT_TYPE_SAS + || phy->att_dev_info & PORT_SSP_INIT_MASK) { + phy->phy_attached = 1; + phy->identify.device_type = + phy->att_dev_info & PORT_DEV_TYPE_MASK; + + if (phy->identify.device_type == SAS_END_DEV) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != NO_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + if (oob_done) + sas_phy->oob_mode = SAS_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct sas_identify_frame); + } + memcpy(sas_phy->attached_sas_addr, + &phy->att_dev_sas_addr, SAS_ADDR_SIZE); + + if (MVS_CHIP_DISP->phy_work_around) + MVS_CHIP_DISP->phy_work_around(mvi, i); + } + mv_dprintk("port %d attach dev info is %x\n", + i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); + mv_dprintk("port %d attach sas addr is %llx\n", + i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); +out_done: + if (get_st) + MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); +} + +static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) +{ + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct mvs_info *mvi = NULL; int i = 0, hi; + struct mvs_phy *phy = sas_phy->lldd_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_port *port; + unsigned long flags = 0; + if (!sas_port) + return; + + while (sas_ha->sas_phy[i]) { + if (sas_ha->sas_phy[i] == sas_phy) + break; + i++; + } + hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; + mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; + if (sas_port->id >= mvi->chip->n_phy) + port = &mvi->port[sas_port->id - mvi->chip->n_phy]; + else + port = &mvi->port[sas_port->id]; + if (lock) + spin_lock_irqsave(&mvi->lock, flags); + port->port_attached = 1; + phy->port = port; + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap = sas_port->phy_mask; + mv_printk("set wide port phy map %x\n", sas_port->phy_mask); + mvs_update_wideport(mvi, sas_phy->id); + } + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); +} + +static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) +{ + /*Nothing*/ +} + + +void mvs_port_formed(struct asd_sas_phy *sas_phy) +{ + mvs_port_notify_formed(sas_phy, 1); +} + +void mvs_port_deformed(struct asd_sas_phy *sas_phy) +{ + mvs_port_notify_deformed(sas_phy, 1); +} + +struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) +{ + u32 dev; + for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { + if (mvi->devices[dev].dev_type == NO_DEVICE) { + mvi->devices[dev].device_id = dev; + return &mvi->devices[dev]; + } + } + + if (dev == MVS_MAX_DEVICES) + mv_printk("max support %d devices, ignore ..\n", + MVS_MAX_DEVICES); + + return NULL; +} + +void mvs_free_dev(struct mvs_device *mvi_dev) +{ + u32 id = mvi_dev->device_id; + memset(mvi_dev, 0, sizeof(*mvi_dev)); + mvi_dev->device_id = id; + mvi_dev->dev_type = NO_DEVICE; + mvi_dev->dev_status = MVS_DEV_NORMAL; + mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; +} + +int mvs_dev_found_notify(struct domain_device *dev, int lock) +{ + unsigned long flags = 0; + int res = 0; + struct mvs_info *mvi = NULL; + struct domain_device *parent_dev = dev->parent; + struct mvs_device *mvi_device; + + mvi = mvs_find_dev_mvi(dev); + + if (lock) + spin_lock_irqsave(&mvi->lock, flags); + + mvi_device = mvs_alloc_dev(mvi); + if (!mvi_device) { + res = -1; + goto found_out; + } + dev->lldd_dev = mvi_device; + mvi_device->dev_type = dev->dev_type; + mvi_device->mvi_info = mvi; + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { + int phy_id; + u8 phy_num = parent_dev->ex_dev.num_phys; + struct ex_phy *phy; + for (phy_id = 0; phy_id < phy_num; phy_id++) { + phy = &parent_dev->ex_dev.ex_phy[phy_id]; + if (SAS_ADDR(phy->attached_sas_addr) == + SAS_ADDR(dev->sas_addr)) { + mvi_device->attached_phy = phy_id; + break; + } + } + + if (phy_id == phy_num) { + mv_printk("Error: no attached dev:%016llx" + "at ex:%016llx.\n", + SAS_ADDR(dev->sas_addr), + SAS_ADDR(parent_dev->sas_addr)); + res = -1; + } + } + +found_out: + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); + return res; +} + +int mvs_dev_found(struct domain_device *dev) +{ + return mvs_dev_found_notify(dev, 1); +} + +void mvs_dev_gone_notify(struct domain_device *dev, int lock) +{ + unsigned long flags = 0; + struct mvs_device *mvi_dev = dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + + if (lock) + spin_lock_irqsave(&mvi->lock, flags); + + if (mvi_dev) { + mv_dprintk("found dev[%d:%x] is gone.\n", + mvi_dev->device_id, mvi_dev->dev_type); + mvs_free_reg_set(mvi, mvi_dev); + mvs_free_dev(mvi_dev); + } else { + mv_dprintk("found dev has gone.\n"); + } + dev->lldd_dev = NULL; + + if (lock) + spin_unlock_irqrestore(&mvi->lock, flags); +} + + +void mvs_dev_gone(struct domain_device *dev) +{ + mvs_dev_gone_notify(dev, 1); +} + +static struct sas_task *mvs_alloc_task(void) +{ + struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL); + + if (task) { + INIT_LIST_HEAD(&task->list); + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_timer(&task->timer); + init_completion(&task->completion); + } + return task; +} + +static void mvs_free_task(struct sas_task *task) +{ + if (task) { + BUG_ON(!list_empty(&task->list)); + kfree(task); + } +} + +static void mvs_task_done(struct sas_task *task) +{ + if (!del_timer(&task->timer)) + return; + complete(&task->completion); +} + +static void mvs_tmf_timedout(unsigned long data) +{ + struct sas_task *task = (struct sas_task *)data; + + task->task_state_flags |= SAS_TASK_STATE_ABORTED; + complete(&task->completion); +} + +/* XXX */ +#define MVS_TASK_TIMEOUT 20 +static int mvs_exec_internal_tmf_task(struct domain_device *dev, + void *parameter, u32 para_len, struct mvs_tmf_task *tmf) +{ + int res, retry; + struct sas_task *task = NULL; + + for (retry = 0; retry < 3; retry++) { + task = mvs_alloc_task(); + if (!task) + return -ENOMEM; + + task->dev = dev; + task->task_proto = dev->tproto; + + memcpy(&task->ssp_task, parameter, para_len); + task->task_done = mvs_task_done; + + task->timer.data = (unsigned long) task; + task->timer.function = mvs_tmf_timedout; + task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; + add_timer(&task->timer); + + res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); + + if (res) { + del_timer(&task->timer); + mv_printk("executing internel task failed:%d\n", res); + goto ex_err; + } + + wait_for_completion(&task->completion); + res = -TMF_RESP_FUNC_FAILED; + /* Even TMF timed out, return direct. */ + if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { + mv_printk("TMF task[%x] timeout.\n", tmf->tmf); + goto ex_err; + } + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAM_GOOD) { + res = TMF_RESP_FUNC_COMPLETE; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_UNDERRUN) { + /* no error, but return the number of bytes of + * underrun */ + res = task->task_status.residual; + break; + } + + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == SAS_DATA_OVERRUN) { + mv_dprintk("blocked task error.\n"); + res = -EMSGSIZE; + break; + } else { + mv_dprintk(" task to dev %016llx response: 0x%x " + "status 0x%x\n", + SAS_ADDR(dev->sas_addr), + task->task_status.resp, + task->task_status.stat); + mvs_free_task(task); + task = NULL; + + } + } +ex_err: + BUG_ON(retry == 3 && task != NULL); + if (task != NULL) + mvs_free_task(task); + return res; +} + +static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, + u8 *lun, struct mvs_tmf_task *tmf) +{ + struct sas_ssp_task ssp_task; + DECLARE_COMPLETION_ONSTACK(completion); + if (!(dev->tproto & SAS_PROTOCOL_SSP)) + return TMF_RESP_FUNC_ESUPP; + + strncpy((u8 *)&ssp_task.LUN, lun, 8); + + return mvs_exec_internal_tmf_task(dev, &ssp_task, + sizeof(ssp_task), tmf); +} + + +/* Standard mandates link reset for ATA (type 0) + and hard reset for SSP (type 1) , only for RECOVERY */ +static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) +{ + int rc; + struct sas_phy *phy = sas_find_local_phy(dev); + int reset_type = (dev->dev_type == SATA_DEV || + (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; + rc = sas_phy_reset(phy, reset_type); + msleep(2000); + return rc; +} + +/* mandatory SAM-3 */ +int mvs_lu_reset(struct domain_device *dev, u8 *lun) +{ + unsigned long flags; + int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + struct mvs_device * mvi_dev = dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + + tmf_task.tmf = TMF_LU_RESET; + mvi_dev->dev_status = MVS_DEV_EH; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + if (rc == TMF_RESP_FUNC_COMPLETE) { + num = mvs_find_dev_phyno(dev, phyno); + spin_lock_irqsave(&mvi->lock, flags); + for (i = 0; i < num; i++) + mvs_release_task(mvi, phyno[i], dev); + spin_unlock_irqrestore(&mvi->lock, flags); + } + /* If failed, fall-through I_T_Nexus reset */ + mv_printk("%s for device[%x]:rc= %d\n", __func__, + mvi_dev->device_id, rc); + return rc; +} + +int mvs_I_T_nexus_reset(struct domain_device *dev) +{ + unsigned long flags; + int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; + struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + + if (mvi_dev->dev_status != MVS_DEV_EH) + return TMF_RESP_FUNC_COMPLETE; + rc = mvs_debug_I_T_nexus_reset(dev); + mv_printk("%s for device[%x]:rc= %d\n", + __func__, mvi_dev->device_id, rc); + + /* housekeeper */ + num = mvs_find_dev_phyno(dev, phyno); + spin_lock_irqsave(&mvi->lock, flags); + for (i = 0; i < num; i++) + mvs_release_task(mvi, phyno[i], dev); + spin_unlock_irqrestore(&mvi->lock, flags); + + return rc; +} +/* optional SAM-3 */ +int mvs_query_task(struct sas_task *task) +{ + u32 tag; + struct scsi_lun lun; + struct mvs_tmf_task tmf_task; + int rc = TMF_RESP_FUNC_FAILED; + + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + + int_to_scsilun(cmnd->device->lun, &lun); + rc = mvs_find_tag(mvi, task, &tag); + if (rc == 0) { + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + + tmf_task.tmf = TMF_QUERY_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); + + rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); + switch (rc) { + /* The task is still in Lun, release it then */ + case TMF_RESP_FUNC_SUCC: + /* The task is not in Lun or failed, reset the phy */ + case TMF_RESP_FUNC_FAILED: + case TMF_RESP_FUNC_COMPLETE: + break; + } + } + mv_printk("%s:rc= %d\n", __func__, rc); + return rc; +} + +/* mandatory SAM-3, still need free task/slot info */ +int mvs_abort_task(struct sas_task *task) +{ + struct scsi_lun lun; + struct mvs_tmf_task tmf_task; + struct domain_device *dev = task->dev; + struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; + struct mvs_info *mvi = mvi_dev->mvi_info; + int rc = TMF_RESP_FUNC_FAILED; + unsigned long flags; + u32 tag; + + if (mvi->exp_req) + mvi->exp_req--; + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + spin_unlock_irqrestore(&task->task_state_lock, flags); + rc = TMF_RESP_FUNC_COMPLETE; + goto out; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { + struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; + + int_to_scsilun(cmnd->device->lun, &lun); + rc = mvs_find_tag(mvi, task, &tag); + if (rc == 0) { + mv_printk("No such tag in %s\n", __func__); + rc = TMF_RESP_FUNC_FAILED; + return rc; + } + + tmf_task.tmf = TMF_ABORT_TASK; + tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); + + rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); + + /* if successful, clear the task and callback forwards.*/ + if (rc == TMF_RESP_FUNC_COMPLETE) { + u32 slot_no; + struct mvs_slot_info *slot; + + if (task->lldd_task) { + slot = task->lldd_task; + slot_no = (u32) (slot - mvi->slot_info); + mvs_slot_complete(mvi, slot_no, 1); + } + } + } else if (task->task_proto & SAS_PROTOCOL_SATA || + task->task_proto & SAS_PROTOCOL_STP) { + /* to do free register_set */ + } else { + /* SMP */ + + } +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + mv_printk("%s:rc= %d\n", __func__, rc); + return rc; +} + +int mvs_abort_task_set(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + + tmf_task.tmf = TMF_ABORT_TASK_SET; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + + return rc; +} + +int mvs_clear_aca(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + + tmf_task.tmf = TMF_CLEAR_ACA; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + + return rc; +} + +int mvs_clear_task_set(struct domain_device *dev, u8 *lun) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct mvs_tmf_task tmf_task; + + tmf_task.tmf = TMF_CLEAR_TASK_SET; + rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); + + return rc; +} + +static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx, int err) +{ + struct mvs_device *mvi_dev = task->dev->lldd_dev; + struct task_status_struct *tstat = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; + int stat = SAM_GOOD; + + + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], + SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), + sizeof(struct dev_to_host_fis)); + tstat->buf_valid_size = sizeof(*resp); + if (unlikely(err)) + stat = SAS_PROTO_RESPONSE; + return stat; +} + +static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) +{ + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + int stat; + u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); + u32 tfs = 0; + enum mvs_port_type type = PORT_TYPE_SAS; + + if (err_dw0 & CMD_ISS_STPD) + MVS_CHIP_DISP->issue_stop(mvi, type, tfs); + + MVS_CHIP_DISP->command_active(mvi, slot_idx); + + stat = SAM_CHECK_COND; + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + stat = SAS_ABORTED_TASK; + break; + case SAS_PROTOCOL_SMP: + stat = SAM_CHECK_COND; + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + { + if (err_dw0 == 0x80400002) + mv_printk("find reserved error, why?\n"); + + task->ata_task.use_ncq = 0; + stat = SAS_PROTO_RESPONSE; + mvs_sata_done(mvi, task, slot_idx, 1); + + } + break; + default: + break; + } + + return stat; +} + +int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + struct sas_task *task = slot->task; + struct mvs_device *mvi_dev = NULL; + struct task_status_struct *tstat; + + bool aborted; + void *to; + enum exec_status sts; + + if (mvi->exp_req) + mvi->exp_req--; + if (unlikely(!task || !task->lldd_task)) + return -1; + + tstat = &task->task_status; + mvi_dev = task->dev->lldd_dev; + + mvs_hba_cq_dump(mvi); + + spin_lock(&task->task_state_lock); + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; + /* race condition*/ + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; + spin_unlock(&task->task_state_lock); + + memset(tstat, 0, sizeof(*tstat)); + tstat->resp = SAS_TASK_COMPLETE; + + if (unlikely(aborted)) { + tstat->stat = SAS_ABORTED_TASK; + if (mvi_dev) + mvi_dev->runing_req--; + if (sas_protocol_ata(task->task_proto)) + mvs_free_reg_set(mvi, mvi_dev); + + mvs_slot_task_free(mvi, task, slot, slot_idx); + return -1; + } + + if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) { + mv_dprintk("port has not device.\n"); + tstat->stat = SAS_PHY_DOWN; + goto out; + } + + /* + if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) { + mv_dprintk("Find device[%016llx] RXQ_ERR %X, + err info:%016llx\n", + SAS_ADDR(task->dev->sas_addr), + rx_desc, (u64)(*(u64 *) slot->response)); + } + */ + + /* error info record present */ + if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { + tstat->stat = mvs_slot_err(mvi, task, slot_idx); + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + /* hw says status == 0, datapres == 0 */ + if (rx_desc & RXQ_GOOD) { + tstat->stat = SAM_GOOD; + tstat->resp = SAS_TASK_COMPLETE; + } + /* response frame present */ + else if (rx_desc & RXQ_RSP) { + struct ssp_response_iu *iu = slot->response + + sizeof(struct mvs_err_info); + sas_ssp_task_response(mvi->dev, task, iu); + } else + tstat->stat = SAM_CHECK_COND; + break; + + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + tstat->stat = SAM_GOOD; + to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); + memcpy(to + sg_resp->offset, + slot->response + sizeof(struct mvs_err_info), + sg_dma_len(sg_resp)); + kunmap_atomic(to, KM_IRQ0); + break; + } + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { + tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); + break; + } + + default: + tstat->stat = SAM_CHECK_COND; + break; + } + +out: + if (mvi_dev) { + mvi_dev->runing_req--; + if (sas_protocol_ata(task->task_proto)) + mvs_free_reg_set(mvi, mvi_dev); + } + mvs_slot_task_free(mvi, task, slot, slot_idx); + sts = tstat->stat; + + spin_unlock(&mvi->lock); + if (task->task_done) + task->task_done(task); + else + mv_dprintk("why has not task_done.\n"); + spin_lock(&mvi->lock); + + return sts; +} + +void mvs_release_task(struct mvs_info *mvi, + int phy_no, struct domain_device *dev) +{ + int i = 0; u32 slot_idx; + struct mvs_phy *phy; + struct mvs_port *port; + struct mvs_slot_info *slot, *slot2; + + phy = &mvi->phy[phy_no]; + port = phy->port; + if (!port) + return; + + list_for_each_entry_safe(slot, slot2, &port->list, entry) { + struct sas_task *task; + slot_idx = (u32) (slot - mvi->slot_info); + task = slot->task; + + if (dev && task->dev != dev) + continue; + + mv_printk("Release slot [%x] tag[%x], task [%p]:\n", + slot_idx, slot->slot_tag, task); + + if (task->task_proto & SAS_PROTOCOL_SSP) { + mv_printk("attached with SSP task CDB["); + for (i = 0; i < 16; i++) + mv_printk(" %02x", task->ssp_task.cdb[i]); + mv_printk(" ]\n"); + } + + mvs_slot_complete(mvi, slot_idx, 1); + } +} + +static void mvs_phy_disconnected(struct mvs_phy *phy) +{ + phy->phy_attached = 0; + phy->att_dev_info = 0; + phy->att_dev_sas_addr = 0; +} + +static void mvs_work_queue(struct work_struct *work) +{ + struct delayed_work *dw = container_of(work, struct delayed_work, work); + struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); + struct mvs_info *mvi = mwq->mvi; + unsigned long flags; + + spin_lock_irqsave(&mvi->lock, flags); + if (mwq->handler & PHY_PLUG_EVENT) { + u32 phy_no = (unsigned long) mwq->data; + struct sas_ha_struct *sas_ha = mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (phy->phy_event & PHY_PLUG_OUT) { + u32 tmp; + struct sas_identify_frame *id; + id = (struct sas_identify_frame *)phy->frame_rcvd; + tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); + phy->phy_event &= ~PHY_PLUG_OUT; + if (!(tmp & PHY_READY_MASK)) { + sas_phy_disconnected(sas_phy); + mvs_phy_disconnected(phy); + sas_ha->notify_phy_event(sas_phy, + PHYE_LOSS_OF_SIGNAL); + mv_dprintk("phy%d Removed Device\n", phy_no); + } else { + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); + mvs_update_phyinfo(mvi, phy_no, 1); + mvs_bytes_dmaed(mvi, phy_no); + mvs_port_notify_formed(sas_phy, 0); + mv_dprintk("phy%d Attached Device\n", phy_no); + } + } + } + list_del(&mwq->entry); + spin_unlock_irqrestore(&mvi->lock, flags); + kfree(mwq); +} + +static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) +{ + struct mvs_wq *mwq; + int ret = 0; + + mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); + if (mwq) { + mwq->mvi = mvi; + mwq->data = data; + mwq->handler = handler; + MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); + list_add_tail(&mwq->entry, &mvi->wq_list); + schedule_delayed_work(&mwq->work_q, HZ * 2); + } else + ret = -ENOMEM; + + return ret; +} + +static void mvs_sig_time_out(unsigned long tphy) +{ + struct mvs_phy *phy = (struct mvs_phy *)tphy; + struct mvs_info *mvi = phy->mvi; + u8 phy_no; + + for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { + if (&mvi->phy[phy_no] == phy) { + mv_dprintk("Get signature time out, reset phy %d\n", + phy_no+mvi->id*mvi->chip->n_phy); + MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); + } + } +} + +static void mvs_sig_remove_timer(struct mvs_phy *phy) +{ + if (phy->timer.function) + del_timer(&phy->timer); + phy->timer.function = NULL; +} + +void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +{ + u32 tmp; + struct sas_ha_struct *sas_ha = mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); + mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, + MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); + mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, + phy->irq_status); + + /* + * events is port event now , + * we need check the interrupt status which belongs to per port. + */ + + if (phy->irq_status & PHYEV_DCDR_ERR) + mv_dprintk("port %d STP decoding error.\n", + phy_no+mvi->id*mvi->chip->n_phy); + + if (phy->irq_status & PHYEV_POOF) { + if (!(phy->phy_event & PHY_PLUG_OUT)) { + int dev_sata = phy->phy_type & PORT_TYPE_SATA; + int ready; + mvs_release_task(mvi, phy_no, NULL); + phy->phy_event |= PHY_PLUG_OUT; + mvs_handle_event(mvi, + (void *)(unsigned long)phy_no, + PHY_PLUG_EVENT); + ready = mvs_is_phy_ready(mvi, phy_no); + if (!ready) + mv_dprintk("phy%d Unplug Notice\n", + phy_no + + mvi->id * mvi->chip->n_phy); + if (ready || dev_sata) { + if (MVS_CHIP_DISP->stp_reset) + MVS_CHIP_DISP->stp_reset(mvi, + phy_no); + else + MVS_CHIP_DISP->phy_reset(mvi, + phy_no, 0); + return; + } + } + } + + if (phy->irq_status & PHYEV_COMWAKE) { + tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); + MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, + tmp | PHYEV_SIG_FIS); + if (phy->timer.function == NULL) { + phy->timer.data = (unsigned long)phy; + phy->timer.function = mvs_sig_time_out; + phy->timer.expires = jiffies + 10*HZ; + add_timer(&phy->timer); + } + } + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); + mvs_sig_remove_timer(phy); + mv_dprintk("notify plug in on phy[%d]\n", phy_no); + if (phy->phy_status) { + mdelay(10); + MVS_CHIP_DISP->detect_porttype(mvi, phy_no); + if (phy->phy_type & PORT_TYPE_SATA) { + tmp = MVS_CHIP_DISP->read_port_irq_mask( + mvi, phy_no); + tmp &= ~PHYEV_SIG_FIS; + MVS_CHIP_DISP->write_port_irq_mask(mvi, + phy_no, tmp); + } + mvs_update_phyinfo(mvi, phy_no, 0); + mvs_bytes_dmaed(mvi, phy_no); + /* whether driver is going to handle hot plug */ + if (phy->phy_event & PHY_PLUG_OUT) { + mvs_port_notify_formed(sas_phy, 0); + phy->phy_event &= ~PHY_PLUG_OUT; + } + } else { + mv_dprintk("plugin interrupt but phy%d is gone\n", + phy_no + mvi->id*mvi->chip->n_phy); + } + } else if (phy->irq_status & PHYEV_BROAD_CH) { + mv_dprintk("port %d broadcast change.\n", + phy_no + mvi->id*mvi->chip->n_phy); + /* exception for Samsung disk drive*/ + mdelay(1000); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + } + MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); +} + +int mvs_int_rx(struct mvs_info *mvi, bool self_clear) +{ + u32 rx_prod_idx, rx_desc; + bool attn = false; + + /* the first dword in the RX ring is special: it contains + * a mirror of the hardware's RX producer index, so that + * we don't have to stall the CPU reading that register. + * The actual RX ring is offset by one dword, due to this. + */ + rx_prod_idx = mvi->rx_cons; + mvi->rx_cons = le32_to_cpu(mvi->rx[0]); + if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ + return 0; + + /* The CMPL_Q may come late, read from register and try again + * note: if coalescing is enabled, + * it will need to read from register every time for sure + */ + if (unlikely(mvi->rx_cons == rx_prod_idx)) + mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; + + if (mvi->rx_cons == rx_prod_idx) + return 0; + + while (mvi->rx_cons != rx_prod_idx) { + /* increment our internal RX consumer pointer */ + rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); + rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); + + if (likely(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + if (rx_desc & RXQ_ATTN) { + attn = true; + } else if (rx_desc & RXQ_ERR) { + if (!(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + } else if (rx_desc & RXQ_SLOT_RESET) { + mvs_slot_free(mvi, rx_desc); + } + } + + if (attn && self_clear) + MVS_CHIP_DISP->int_full(mvi); + return 0; +} + diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h new file mode 100644 index 000000000000..aa2270af1bac --- /dev/null +++ b/drivers/scsi/mvsas/mv_sas.h @@ -0,0 +1,406 @@ +/* + * Marvell 88SE64xx/88SE94xx main function head file + * + * Copyright 2007 Red Hat, Inc. + * Copyright 2008 Marvell. <kewei@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#ifndef _MV_SAS_H_ +#define _MV_SAS_H_ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/types.h> +#include <linux/ctype.h> +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/vmalloc.h> +#include <scsi/libsas.h> +#include <scsi/scsi_tcq.h> +#include <scsi/sas_ata.h> +#include <linux/version.h> +#include "mv_defs.h" + +#define DRV_NAME "mvsas" +#define DRV_VERSION "0.8.2" +#define _MV_DUMP 0 +#define MVS_ID_NOT_MAPPED 0x7f +/* #define DISABLE_HOTPLUG_DMA_FIX */ +#define MAX_EXP_RUNNING_REQ 2 +#define WIDE_PORT_MAX_PHY 4 +#define MV_DISABLE_NCQ 0 +#define mv_printk(fmt, arg ...) \ + printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) +#ifdef MV_DEBUG +#define mv_dprintk(format, arg...) \ + printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg) +#else +#define mv_dprintk(format, arg...) +#endif +#define MV_MAX_U32 0xffffffff + +extern struct mvs_tgt_initiator mvs_tgt; +extern struct mvs_info *tgt_mvi; +extern const struct mvs_dispatch mvs_64xx_dispatch; +extern const struct mvs_dispatch mvs_94xx_dispatch; + +#define DEV_IS_EXPANDER(type) \ + ((type == EDGE_DEV) || (type == FANOUT_DEV)) + +#define bit(n) ((u32)1 << n) + +#define for_each_phy(__lseq_mask, __mc, __lseq) \ + for ((__mc) = (__lseq_mask), (__lseq) = 0; \ + (__mc) != 0 ; \ + (++__lseq), (__mc) >>= 1) + +#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f) +#define UNASSOC_D2H_FIS(id) \ + ((void *) mvi->rx_fis + 0x100 * id) +#define SATA_RECEIVED_FIS_LIST(reg_set) \ + ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set) +#define SATA_RECEIVED_SDB_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58) +#define SATA_RECEIVED_D2H_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40) +#define SATA_RECEIVED_PIO_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20) +#define SATA_RECEIVED_DMA_FIS(reg_set) \ + (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00) + +enum dev_status { + MVS_DEV_NORMAL = 0x0, + MVS_DEV_EH = 0x1, +}; + + +struct mvs_info; + +struct mvs_dispatch { + char *name; + int (*chip_init)(struct mvs_info *mvi); + int (*spi_init)(struct mvs_info *mvi); + int (*chip_ioremap)(struct mvs_info *mvi); + void (*chip_iounmap)(struct mvs_info *mvi); + irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat); + u32 (*isr_status)(struct mvs_info *mvi, int irq); + void (*interrupt_enable)(struct mvs_info *mvi); + void (*interrupt_disable)(struct mvs_info *mvi); + + u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port); + void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val); + + u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port); + void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val); + void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr); + + u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port); + void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val); + void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr); + + u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port); + void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val); + + u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); + void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); + + void (*get_sas_addr)(void *buf, u32 buflen); + void (*command_active)(struct mvs_info *mvi, u32 slot_idx); + void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, + u32 tfs); + void (*start_delivery)(struct mvs_info *mvi, u32 tx); + u32 (*rx_update)(struct mvs_info *mvi); + void (*int_full)(struct mvs_info *mvi); + u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs); + void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs); + u32 (*prd_size)(void); + u32 (*prd_count)(void); + void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); + void (*detect_porttype)(struct mvs_info *mvi, int i); + int (*oob_done)(struct mvs_info *mvi, int i); + void (*fix_phy_info)(struct mvs_info *mvi, int i, + struct sas_identify_frame *id); + void (*phy_work_around)(struct mvs_info *mvi, int i); + void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id, + struct sas_phy_linkrates *rates); + u32 (*phy_max_link_rate)(void); + void (*phy_disable)(struct mvs_info *mvi, u32 phy_id); + void (*phy_enable)(struct mvs_info *mvi, u32 phy_id); + void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard); + void (*stp_reset)(struct mvs_info *mvi, u32 phy_id); + void (*clear_active_cmds)(struct mvs_info *mvi); + u32 (*spi_read_data)(struct mvs_info *mvi); + void (*spi_write_data)(struct mvs_info *mvi, u32 data); + int (*spi_buildcmd)(struct mvs_info *mvi, + u32 *dwCmd, + u8 cmd, + u8 read, + u8 length, + u32 addr + ); + int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); + int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); +#ifndef DISABLE_HOTPLUG_DMA_FIX + void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd); +#endif + +}; + +struct mvs_chip_info { + u32 n_host; + u32 n_phy; + u32 fis_offs; + u32 fis_count; + u32 srs_sz; + u32 slot_width; + const struct mvs_dispatch *dispatch; +}; +#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) +#define MVS_RX_FISL_SZ \ + (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) +#define MVS_CHIP_DISP (mvi->chip->dispatch) + +struct mvs_err_info { + __le32 flags; + __le32 flags2; +}; + +struct mvs_cmd_hdr { + __le32 flags; /* PRD tbl len; SAS, SATA ctl */ + __le32 lens; /* cmd, max resp frame len */ + __le32 tags; /* targ port xfer tag; tag */ + __le32 data_len; /* data xfer len */ + __le64 cmd_tbl; /* command table address */ + __le64 open_frame; /* open addr frame address */ + __le64 status_buf; /* status buffer address */ + __le64 prd_tbl; /* PRD tbl address */ + __le32 reserved[4]; +}; + +struct mvs_port { + struct asd_sas_port sas_port; + u8 port_attached; + u8 wide_port_phymap; + struct list_head list; +}; + +struct mvs_phy { + struct mvs_info *mvi; + struct mvs_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct scsi_device *sdev; + struct timer_list timer; + u64 dev_sas_addr; + u64 att_dev_sas_addr; + u32 att_dev_info; + u32 dev_info; + u32 phy_type; + u32 phy_status; + u32 irq_status; + u32 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + u8 phy_mode; + u8 reserved[2]; + u32 phy_event; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; +}; + +struct mvs_device { + struct list_head dev_entry; + enum sas_dev_type dev_type; + struct mvs_info *mvi_info; + struct domain_device *sas_device; + u32 attached_phy; + u32 device_id; + u32 runing_req; + u8 taskfileset; + u8 dev_status; + u16 reserved; +}; + +struct mvs_slot_info { + struct list_head entry; + union { + struct sas_task *task; + void *tdata; + }; + u32 n_elem; + u32 tx; + u32 slot_tag; + + /* DMA buffer for storing cmd tbl, open addr frame, status buffer, + * and PRD table + */ + void *buf; + dma_addr_t buf_dma; +#if _MV_DUMP + u32 cmd_size; +#endif + void *response; + struct mvs_port *port; + struct mvs_device *device; + void *open_frame; +}; + +struct mvs_info { + unsigned long flags; + + /* host-wide lock */ + spinlock_t lock; + + /* our device */ + struct pci_dev *pdev; + struct device *dev; + + /* enhanced mode registers */ + void __iomem *regs; + + /* peripheral or soc registers */ + void __iomem *regs_ex; + u8 sas_addr[SAS_ADDR_SIZE]; + + /* SCSI/SAS glue */ + struct sas_ha_struct *sas; + struct Scsi_Host *shost; + + /* TX (delivery) DMA ring */ + __le32 *tx; + dma_addr_t tx_dma; + + /* cached next-producer idx */ + u32 tx_prod; + + /* RX (completion) DMA ring */ + __le32 *rx; + dma_addr_t rx_dma; + + /* RX consumer idx */ + u32 rx_cons; + + /* RX'd FIS area */ + __le32 *rx_fis; + dma_addr_t rx_fis_dma; + + /* DMA command header slots */ + struct mvs_cmd_hdr *slot; + dma_addr_t slot_dma; + + u32 chip_id; + const struct mvs_chip_info *chip; + + int tags_num; + DECLARE_BITMAP(tags, MVS_SLOTS); + /* further per-slot information */ + struct mvs_phy phy[MVS_MAX_PHYS]; + struct mvs_port port[MVS_MAX_PHYS]; + u32 irq; + u32 exp_req; + u32 id; + u64 sata_reg_set; + struct list_head *hba_list; + struct list_head soc_entry; + struct list_head wq_list; + unsigned long instance; + u16 flashid; + u32 flashsize; + u32 flashsectSize; + + void *addon; + struct mvs_device devices[MVS_MAX_DEVICES]; +#ifndef DISABLE_HOTPLUG_DMA_FIX + void *bulk_buffer; + dma_addr_t bulk_buffer_dma; +#define TRASH_BUCKET_SIZE 0x20000 +#endif + struct mvs_slot_info slot_info[0]; +}; + +struct mvs_prv_info{ + u8 n_host; + u8 n_phy; + u16 reserve; + struct mvs_info *mvi[2]; +}; + +struct mvs_wq { + struct delayed_work work_q; + struct mvs_info *mvi; + void *data; + int handler; + struct list_head entry; +}; + +struct mvs_task_exec_info { + struct sas_task *task; + struct mvs_cmd_hdr *hdr; + struct mvs_port *port; + u32 tag; + int n_elem; +}; + + +/******************** function prototype *********************/ +void mvs_get_sas_addr(void *buf, u32 buflen); +void mvs_tag_clear(struct mvs_info *mvi, u32 tag); +void mvs_tag_free(struct mvs_info *mvi, u32 tag); +void mvs_tag_set(struct mvs_info *mvi, unsigned int tag); +int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out); +void mvs_tag_init(struct mvs_info *mvi); +void mvs_iounmap(void __iomem *regs); +int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); +void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); +int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, + u32 off_lo, u32 off_hi, u64 sas_addr); +int mvs_slave_alloc(struct scsi_device *scsi_dev); +int mvs_slave_configure(struct scsi_device *sdev); +void mvs_scan_start(struct Scsi_Host *shost); +int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); +int mvs_queue_command(struct sas_task *task, const int num, + gfp_t gfp_flags); +int mvs_abort_task(struct sas_task *task); +int mvs_abort_task_set(struct domain_device *dev, u8 *lun); +int mvs_clear_aca(struct domain_device *dev, u8 *lun); +int mvs_clear_task_set(struct domain_device *dev, u8 * lun); +void mvs_port_formed(struct asd_sas_phy *sas_phy); +void mvs_port_deformed(struct asd_sas_phy *sas_phy); +int mvs_dev_found(struct domain_device *dev); +void mvs_dev_gone(struct domain_device *dev); +int mvs_lu_reset(struct domain_device *dev, u8 *lun); +int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags); +int mvs_I_T_nexus_reset(struct domain_device *dev); +int mvs_query_task(struct sas_task *task); +void mvs_release_task(struct mvs_info *mvi, int phy_no, + struct domain_device *dev); +void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); +void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); +int mvs_int_rx(struct mvs_info *mvi, bool self_clear); +void mvs_hexdump(u32 size, u8 *data, u32 baseaddr); +#endif + diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild index 0e207aa67d16..5fd73d77c3af 100644 --- a/drivers/scsi/osd/Kbuild +++ b/drivers/scsi/osd/Kbuild @@ -11,31 +11,6 @@ # it under the terms of the GNU General Public License version 2 # -ifneq ($(OSD_INC),) -# we are built out-of-tree Kconfigure everything as on - -CONFIG_SCSI_OSD_INITIATOR=m -ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE - -CONFIG_SCSI_OSD_ULD=m -ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE - -# CONFIG_SCSI_OSD_DPRINT_SENSE = -# 0 - no print of errors -# 1 - print errors -# 2 - errors + warrnings -ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1 - -# Uncomment to turn debug on -# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG - -# if we are built out-of-tree and the hosting kernel has OSD headers -# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing -# this it will work. This might break in future kernels -LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE) - -endif - # libosd.ko - osd-initiator library libosd-y := osd_initiator.o obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile deleted file mode 100755 index d905344f83ba..000000000000 --- a/drivers/scsi/osd/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# -# Makefile for the OSD modules (out of tree) -# -# Copyright (C) 2008 Panasas Inc. All rights reserved. -# -# Authors: -# Boaz Harrosh <bharrosh@panasas.com> -# Benny Halevy <bhalevy@panasas.com> -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 -# -# This Makefile is used to call the kernel Makefile in case of an out-of-tree -# build. -# $KSRC should point to a Kernel source tree otherwise host's default is -# used. (eg. /lib/modules/`uname -r`/build) - -# include path for out-of-tree Headers -OSD_INC ?= `pwd`/../../../include - -# allow users to override these -# e.g. to compile for a kernel that you aren't currently running -KSRC ?= /lib/modules/$(shell uname -r)/build -KBUILD_OUTPUT ?= -ARCH ?= -V ?= 0 - -# this is the basic Kbuild out-of-tree invocation, with the M= option -KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V) - -all: libosd - -libosd: ; - $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules - -clean: - $(KBUILD_BASE) clean diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index 5776b2ab6b12..7a117c18114c 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) _osd_ver_desc(or)); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n", + OSD_INFO("VENDOR_IDENTIFICATION [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n", + OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n", + OSD_INFO("PRODUCT_MODEL [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n", + OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n", pFirst ? get_unaligned_be32(pFirst) : ~0U); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n", + OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", (char *)pFirst); pFirst = get_attrs[a].val_ptr; - OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst); + OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst); a++; pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n", + OSD_INFO("TOTAL_CAPACITY [0x%llx]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n", + OSD_INFO("USED_CAPACITY [0x%llx]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n", + OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); if (a >= nelem) @@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) /* FIXME: Where are the time utilities */ pFirst = get_attrs[a++].val_ptr; - OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", + OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", ((char *)pFirst)[0], ((char *)pFirst)[1], ((char *)pFirst)[2], ((char *)pFirst)[3], ((char *)pFirst)[4], ((char *)pFirst)[5]); @@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, sid_dump, sizeof(sid_dump), true); - OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump); + OSD_INFO("OSD_SYSTEM_ID(%d)\n" + " [%s]\n", len, sid_dump); a++; } out: @@ -669,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or, __be16 action, const struct osd_obj_id *obj, osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem) { - struct request_queue *q = or->osd_dev->scsi_device->request_queue; + struct request_queue *q = osd_request_queue(or->osd_dev); u64 len = nelem * sizeof(osd_id) + sizeof(*list); struct bio *bio; @@ -778,16 +779,32 @@ EXPORT_SYMBOL(osd_req_remove_object); */ void osd_req_write(struct osd_request *or, - const struct osd_obj_id *obj, struct bio *bio, u64 offset) + const struct osd_obj_id *obj, u64 offset, + struct bio *bio, u64 len) { - _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size); + _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); WARN_ON(or->out.bio || or->out.total_bytes); - bio->bi_rw |= (1 << BIO_RW); + WARN_ON(0 == bio_rw_flagged(bio, BIO_RW)); or->out.bio = bio; - or->out.total_bytes = bio->bi_size; + or->out.total_bytes = len; } EXPORT_SYMBOL(osd_req_write); +int osd_req_write_kern(struct osd_request *or, + const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) +{ + struct request_queue *req_q = osd_request_queue(or->osd_dev); + struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ + osd_req_write(or, obj, offset, bio, len); + return 0; +} +EXPORT_SYMBOL(osd_req_write_kern); + /*TODO: void osd_req_append(struct osd_request *, const struct osd_obj_id *, struct bio *data_out); */ /*TODO: void osd_req_create_write(struct osd_request *, @@ -813,16 +830,31 @@ void osd_req_flush_object(struct osd_request *or, EXPORT_SYMBOL(osd_req_flush_object); void osd_req_read(struct osd_request *or, - const struct osd_obj_id *obj, struct bio *bio, u64 offset) + const struct osd_obj_id *obj, u64 offset, + struct bio *bio, u64 len) { - _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size); + _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); WARN_ON(or->in.bio || or->in.total_bytes); - bio->bi_rw &= ~(1 << BIO_RW); + WARN_ON(1 == bio_rw_flagged(bio, BIO_RW)); or->in.bio = bio; - or->in.total_bytes = bio->bi_size; + or->in.total_bytes = len; } EXPORT_SYMBOL(osd_req_read); +int osd_req_read_kern(struct osd_request *or, + const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) +{ + struct request_queue *req_q = osd_request_queue(or->osd_dev); + struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + osd_req_read(or, obj, offset, bio, len); + return 0; +} +EXPORT_SYMBOL(osd_req_read_kern); + void osd_req_get_attributes(struct osd_request *or, const struct osd_obj_id *obj) { @@ -1213,7 +1245,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1, } static int _osd_req_finalize_data_integrity(struct osd_request *or, - bool has_in, bool has_out, const u8 *cap_key) + bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key) { struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); int ret; @@ -1228,8 +1260,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or, }; unsigned pad; - or->out_data_integ.data_bytes = cpu_to_be64( - or->out.bio ? or->out.bio->bi_size : 0); + or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes); or->out_data_integ.set_attributes_bytes = cpu_to_be64( or->set_attr.total_bytes); or->out_data_integ.get_attributes_bytes = cpu_to_be64( @@ -1306,6 +1337,8 @@ static int _init_blk_request(struct osd_request *or, or->request = req; req->cmd_type = REQ_TYPE_BLOCK_PC; + req->cmd_flags |= REQ_QUIET; + req->timeout = or->timeout; req->retries = or->retries; req->sense = or->sense; @@ -1339,6 +1372,7 @@ int osd_finalize_request(struct osd_request *or, { struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); bool has_in, has_out; + u64 out_data_bytes = or->out.total_bytes; int ret; if (options & OSD_REQ_FUA) @@ -1388,7 +1422,8 @@ int osd_finalize_request(struct osd_request *or, } } - ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key); + ret = _osd_req_finalize_data_integrity(or, has_in, has_out, + out_data_bytes, cap_key); if (ret) return ret; diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index 22b59e13ba83..0bdef3390902 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -49,6 +49,7 @@ #include <linux/device.h> #include <linux/idr.h> #include <linux/major.h> +#include <linux/file.h> #include <scsi/scsi.h> #include <scsi/scsi_driver.h> @@ -175,10 +176,9 @@ static const struct file_operations osd_fops = { struct osd_dev *osduld_path_lookup(const char *name) { - struct path path; - struct inode *inode; - struct cdev *cdev; - struct osd_uld_device *uninitialized_var(oud); + struct osd_uld_device *oud; + struct osd_dev *od; + struct file *file; int error; if (!name || !*name) { @@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name) return ERR_PTR(-EINVAL); } - error = kern_path(name, LOOKUP_FOLLOW, &path); - if (error) { - OSD_ERR("path_lookup of %s failed=>%d\n", name, error); - return ERR_PTR(error); - } + od = kzalloc(sizeof(*od), GFP_KERNEL); + if (!od) + return ERR_PTR(-ENOMEM); - inode = path.dentry->d_inode; - error = -EINVAL; /* Not the right device e.g osd_uld_device */ - if (!S_ISCHR(inode->i_mode)) { - OSD_DEBUG("!S_ISCHR()\n"); - goto out; + file = filp_open(name, O_RDWR, 0); + if (IS_ERR(file)) { + error = PTR_ERR(file); + goto free_od; } - cdev = inode->i_cdev; - if (!cdev) { - OSD_ERR("Before mounting an OSD Based filesystem\n"); - OSD_ERR(" user-mode must open+close the %s device\n", name); - OSD_ERR(" Example: bash: echo < %s\n", name); - goto out; + if (file->f_op != &osd_fops){ + error = -EINVAL; + goto close_file; } - /* The Magic wand. Is it our char-dev */ - /* TODO: Support sg devices */ - if (cdev->owner != THIS_MODULE) { - OSD_ERR("Error mounting %s - is not an OSD device\n", name); - goto out; - } + oud = file->private_data; - oud = container_of(cdev, struct osd_uld_device, cdev); + *od = oud->od; + od->file = file; - __uld_get(oud); - error = 0; + return od; -out: - path_put(&path); - return error ? ERR_PTR(error) : &oud->od; +close_file: + fput(file); +free_od: + kfree(od); + return ERR_PTR(error); } EXPORT_SYMBOL(osduld_path_lookup); void osduld_put_device(struct osd_dev *od) { - if (od) { - struct osd_uld_device *oud = container_of(od, - struct osd_uld_device, od); - __uld_put(oud); + if (od && !IS_ERR(od)) { + struct osd_uld_device *oud = od->file->private_data; + + BUG_ON(od->scsi_device != oud->od.scsi_device); + + fput(od->file); + kfree(od); } } EXPORT_SYMBOL(osduld_put_device); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 5defe5ea5eda..8371d917a9a2 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -17,9 +17,12 @@ * General Public License for more details. * ******************************************************************************/ -#define QLA1280_VERSION "3.26" +#define QLA1280_VERSION "3.27" /***************************************************************************** Revision History: + Rev 3.27, February 10, 2009, Michael Reed + - General code cleanup. + - Improve error recovery. Rev 3.26, January 16, 2006 Jes Sorensen - Ditch all < 2.6 support Rev 3.25.1, February 10, 2005 Christoph Hellwig @@ -435,7 +438,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *, uint8_t, uint16_t *); static int qla1280_bus_reset(struct scsi_qla_host *, int); static int qla1280_device_reset(struct scsi_qla_host *, int, int); -static int qla1280_abort_device(struct scsi_qla_host *, int, int, int); static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); static int qla1280_abort_isp(struct scsi_qla_host *); #ifdef QLA_64BIT_PTR @@ -698,7 +700,7 @@ qla1280_info(struct Scsi_Host *host) } /************************************************************************** - * qla1200_queuecommand + * qla1280_queuecommand * Queue a command to the controller. * * Note: @@ -713,12 +715,14 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; - struct srb *sp = (struct srb *)&cmd->SCp; + struct srb *sp = (struct srb *)CMD_SP(cmd); int status; cmd->scsi_done = fn; sp->cmd = cmd; sp->flags = 0; + sp->wait = NULL; + CMD_HANDLE(cmd) = (unsigned char *)NULL; qla1280_print_scsi_cmd(5, cmd); @@ -738,21 +742,11 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) enum action { ABORT_COMMAND, - ABORT_DEVICE, DEVICE_RESET, BUS_RESET, ADAPTER_RESET, - FAIL }; -/* timer action for error action processor */ -static void qla1280_error_wait_timeout(unsigned long __data) -{ - struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data; - struct srb *sp = (struct srb *)CMD_SP(cmd); - - complete(sp->wait); -} static void qla1280_mailbox_timeout(unsigned long __data) { @@ -767,8 +761,67 @@ static void qla1280_mailbox_timeout(unsigned long __data) complete(ha->mailbox_wait); } +static int +_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp, + struct completion *wait) +{ + int status = FAILED; + struct scsi_cmnd *cmd = sp->cmd; + + spin_unlock_irq(ha->host->host_lock); + wait_for_completion_timeout(wait, 4*HZ); + spin_lock_irq(ha->host->host_lock); + sp->wait = NULL; + if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) { + status = SUCCESS; + (*cmd->scsi_done)(cmd); + } + return status; +} + +static int +qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp) +{ + DECLARE_COMPLETION_ONSTACK(wait); + + sp->wait = &wait; + return _qla1280_wait_for_single_command(ha, sp, &wait); +} + +static int +qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target) +{ + int cnt; + int status; + struct srb *sp; + struct scsi_cmnd *cmd; + + status = SUCCESS; + + /* + * Wait for all commands with the designated bus/target + * to be completed by the firmware + */ + for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + sp = ha->outstanding_cmds[cnt]; + if (sp) { + cmd = sp->cmd; + + if (bus >= 0 && SCSI_BUS_32(cmd) != bus) + continue; + if (target >= 0 && SCSI_TCN_32(cmd) != target) + continue; + + status = qla1280_wait_for_single_command(ha, sp); + if (status == FAILED) + break; + } + } + return status; +} + /************************************************************************** - * qla1200_error_action + * qla1280_error_action * The function will attempt to perform a specified error action and * wait for the results (or time out). * @@ -780,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data) * Returns: * SUCCESS or FAILED * - * Note: - * Resetting the bus always succeeds - is has to, otherwise the - * kernel will panic! Try a surgical technique - sending a BUS - * DEVICE RESET message - on the offending target before pulling - * the SCSI bus reset line. **************************************************************************/ static int qla1280_error_action(struct scsi_cmnd *cmd, enum action action) @@ -792,13 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) struct scsi_qla_host *ha; int bus, target, lun; struct srb *sp; - uint16_t data; - unsigned char *handle; - int result, i; + int i, found; + int result=FAILED; + int wait_for_bus=-1; + int wait_for_target = -1; DECLARE_COMPLETION_ONSTACK(wait); - struct timer_list timer; + + ENTER("qla1280_error_action"); ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); + sp = (struct srb *)CMD_SP(cmd); + bus = SCSI_BUS_32(cmd); + target = SCSI_TCN_32(cmd); + lun = SCSI_LUN_32(cmd); dprintk(4, "error_action %i, istatus 0x%04x\n", action, RD_REG_WORD(&ha->iobase->istatus)); @@ -807,99 +861,47 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) RD_REG_WORD(&ha->iobase->host_cmd), RD_REG_WORD(&ha->iobase->ictrl), jiffies); - ENTER("qla1280_error_action"); if (qla1280_verbose) printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " "Handle=0x%p, action=0x%x\n", ha->host_no, cmd, CMD_HANDLE(cmd), action); - if (cmd == NULL) { - printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL " - "si_Cmnd pointer, failing.\n"); - LEAVE("qla1280_error_action"); - return FAILED; - } - - ha = (struct scsi_qla_host *)cmd->device->host->hostdata; - sp = (struct srb *)CMD_SP(cmd); - handle = CMD_HANDLE(cmd); - - /* Check for pending interrupts. */ - data = qla1280_debounce_register(&ha->iobase->istatus); - /* - * The io_request_lock is held when the reset handler is called, hence - * the interrupt handler cannot be running in parallel as it also - * grabs the lock. /Jes - */ - if (data & RISC_INT) - qla1280_isr(ha, &ha->done_q); - /* - * Determine the suggested action that the mid-level driver wants - * us to perform. + * Check to see if we have the command in the outstanding_cmds[] + * array. If not then it must have completed before this error + * action was initiated. If the error_action isn't ABORT_COMMAND + * then the driver must proceed with the requested action. */ - if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { - if(action == ABORT_COMMAND) { - /* we never got this command */ - printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); - return SUCCESS; /* no action - we don't have command */ + found = -1; + for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { + if (sp == ha->outstanding_cmds[i]) { + found = i; + sp->wait = &wait; /* we'll wait for it to complete */ + break; } - } else { - sp->wait = &wait; } - bus = SCSI_BUS_32(cmd); - target = SCSI_TCN_32(cmd); - lun = SCSI_LUN_32(cmd); + if (found < 0) { /* driver doesn't have command */ + result = SUCCESS; + if (qla1280_verbose) { + printk(KERN_INFO + "scsi(%ld:%d:%d:%d): specified command has " + "already completed.\n", ha->host_no, bus, + target, lun); + } + } - /* Overloading result. Here it means the success or fail of the - * *issue* of the action. When we return from the routine, it must - * mean the actual success or fail of the action */ - result = FAILED; switch (action) { - case FAIL: - break; case ABORT_COMMAND: - if ((sp->flags & SRB_ABORT_PENDING)) { - printk(KERN_WARNING - "scsi(): Command has a pending abort " - "message - ABORT_PENDING.\n"); - /* This should technically be impossible since we - * now wait for abort completion */ - break; - } - - for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { - if (sp == ha->outstanding_cmds[i]) { - dprintk(1, "qla1280: RISC aborting command\n"); - if (qla1280_abort_command(ha, sp, i) == 0) - result = SUCCESS; - else { - /* - * Since we don't know what might - * have happend to the command, it - * is unsafe to remove it from the - * device's queue at this point. - * Wait and let the escalation - * process take care of it. - */ - printk(KERN_WARNING - "scsi(%li:%i:%i:%i): Unable" - " to abort command!\n", - ha->host_no, bus, target, lun); - } - } - } - break; - - case ABORT_DEVICE: - if (qla1280_verbose) - printk(KERN_INFO - "scsi(%ld:%d:%d:%d): Queueing abort device " - "command.\n", ha->host_no, bus, target, lun); - if (qla1280_abort_device(ha, bus, target, lun) == 0) - result = SUCCESS; + dprintk(1, "qla1280: RISC aborting command\n"); + /* + * The abort might fail due to race when the host_lock + * is released to issue the abort. As such, we + * don't bother to check the return status. + */ + if (found >= 0) + qla1280_abort_command(ha, sp, found); break; case DEVICE_RESET: @@ -907,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) printk(KERN_INFO "scsi(%ld:%d:%d:%d): Queueing device reset " "command.\n", ha->host_no, bus, target, lun); - if (qla1280_device_reset(ha, bus, target) == 0) - result = SUCCESS; + if (qla1280_device_reset(ha, bus, target) == 0) { + /* issued device reset, set wait conditions */ + wait_for_bus = bus; + wait_for_target = target; + } break; case BUS_RESET: if (qla1280_verbose) printk(KERN_INFO "qla1280(%ld:%d): Issued bus " "reset.\n", ha->host_no, bus); - if (qla1280_bus_reset(ha, bus) == 0) - result = SUCCESS; + if (qla1280_bus_reset(ha, bus) == 0) { + /* issued bus reset, set wait conditions */ + wait_for_bus = bus; + } break; case ADAPTER_RESET: @@ -929,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) "continue automatically\n", ha->host_no); } ha->flags.reset_active = 1; - /* - * We restarted all of the commands automatically, so the - * mid-level code can expect completions momentitarily. - */ - if (qla1280_abort_isp(ha) == 0) - result = SUCCESS; + + if (qla1280_abort_isp(ha) != 0) { /* it's dead */ + result = FAILED; + } ha->flags.reset_active = 0; } - if (!list_empty(&ha->done_q)) - qla1280_done(ha); - - /* If we didn't manage to issue the action, or we have no - * command to wait for, exit here */ - if (result == FAILED || handle == NULL || - handle == (unsigned char *)INVALID_HANDLE) { - /* - * Clear completion queue to avoid qla1280_done() trying - * to complete the command at a later stage after we - * have exited the current context - */ - sp->wait = NULL; - goto leave; - } + /* + * At this point, the host_lock has been released and retaken + * by the issuance of the mailbox command. + * Wait for the command passed in by the mid-layer if it + * was found by the driver. It might have been returned + * between eh recovery steps, hence the check of the "found" + * variable. + */ - /* set up a timer just in case we're really jammed */ - init_timer(&timer); - timer.expires = jiffies + 4*HZ; - timer.data = (unsigned long)cmd; - timer.function = qla1280_error_wait_timeout; - add_timer(&timer); + if (found >= 0) + result = _qla1280_wait_for_single_command(ha, sp, &wait); - /* wait for the action to complete (or the timer to expire) */ - spin_unlock_irq(ha->host->host_lock); - wait_for_completion(&wait); - del_timer_sync(&timer); - spin_lock_irq(ha->host->host_lock); - sp->wait = NULL; + if (action == ABORT_COMMAND && result != SUCCESS) { + printk(KERN_WARNING + "scsi(%li:%i:%i:%i): " + "Unable to abort command!\n", + ha->host_no, bus, target, lun); + } - /* the only action we might get a fail for is abort */ - if (action == ABORT_COMMAND) { - if(sp->flags & SRB_ABORTED) - result = SUCCESS; - else - result = FAILED; + /* + * If the command passed in by the mid-layer has been + * returned by the board, then wait for any additional + * commands which are supposed to complete based upon + * the error action. + * + * All commands are unconditionally returned during a + * call to qla1280_abort_isp(), ADAPTER_RESET. No need + * to wait for them. + */ + if (result == SUCCESS && wait_for_bus >= 0) { + result = qla1280_wait_for_pending_commands(ha, + wait_for_bus, wait_for_target); } - leave: dprintk(1, "RESET returning %d\n", result); LEAVE("qla1280_error_action"); @@ -1280,13 +1280,12 @@ qla1280_done(struct scsi_qla_host *ha) switch ((CMD_RESULT(cmd) >> 16)) { case DID_RESET: /* Issue marker command. */ - qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); + if (!ha->flags.abort_isp_active) + qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); break; case DID_ABORT: sp->flags &= ~SRB_ABORT_PENDING; sp->flags |= SRB_ABORTED; - if (sp->flags & SRB_TIMEOUT) - CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16; break; default: break; @@ -1296,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha) scsi_dma_unmap(cmd); /* Call the mid-level driver interrupt handler */ - CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; ha->actthreads--; - (*(cmd)->scsi_done)(cmd); - - if(sp->wait != NULL) + if (sp->wait == NULL) + (*(cmd)->scsi_done)(cmd); + else complete(sp->wait); } LEAVE("qla1280_done"); @@ -2417,9 +2415,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) { struct device_reg __iomem *reg = ha->iobase; -#if 0 - LIST_HEAD(done_q); -#endif int status = 0; int cnt; uint16_t *optr, *iptr; @@ -2493,19 +2488,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) mr = MAILBOX_REGISTER_COUNT; memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); -#if 0 - /* Go check for any response interrupts pending. */ - qla1280_isr(ha, &done_q); -#endif - if (ha->flags.reset_marker) qla1280_rst_aen(ha); -#if 0 - if (!list_empty(&done_q)) - qla1280_done(ha, &done_q); -#endif - if (status) dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " "0x%x ****\n", mb[0]); @@ -2641,41 +2626,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) } /* - * qla1280_abort_device - * Issue an abort message to the device - * - * Input: - * ha = adapter block pointer. - * bus = SCSI BUS. - * target = SCSI ID. - * lun = SCSI LUN. - * - * Returns: - * 0 = success - */ -static int -qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun) -{ - uint16_t mb[MAILBOX_REGISTER_COUNT]; - int status; - - ENTER("qla1280_abort_device"); - - mb[0] = MBC_ABORT_DEVICE; - mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; - status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); - - /* Issue marker command. */ - qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN); - - if (status) - dprintk(2, "qla1280_abort_device: **** FAILED ****\n"); - - LEAVE("qla1280_abort_device"); - return status; -} - -/* * qla1280_abort_command * Abort command aborts a specified IOCB. * @@ -2833,7 +2783,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) /* If room for request in request ring. */ if ((req_cnt + 2) >= ha->req_q_cnt) { - status = 1; + status = SCSI_MLQUEUE_HOST_BUSY; dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, req_cnt); @@ -2845,7 +2795,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ha->outstanding_cmds[cnt] != NULL; cnt++); if (cnt >= MAX_OUTSTANDING_COMMANDS) { - status = 1; + status = SCSI_MLQUEUE_HOST_BUSY; dprintk(2, "qla1280_start_scsi: NO ROOM IN " "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); goto out; @@ -3108,7 +3058,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ha->req_q_cnt, seg_cnt); /* If room for request in request ring. */ if ((req_cnt + 2) >= ha->req_q_cnt) { - status = 1; + status = SCSI_MLQUEUE_HOST_BUSY; dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, req_cnt); @@ -3120,7 +3070,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) (ha->outstanding_cmds[cnt] != 0); cnt++) ; if (cnt >= MAX_OUTSTANDING_COMMANDS) { - status = 1; + status = SCSI_MLQUEUE_HOST_BUSY; dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); goto out; @@ -3487,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) /* Save ISP completion status */ CMD_RESULT(sp->cmd) = 0; + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; /* Place block on done queue */ list_add_tail(&sp->list, done_q); @@ -3495,7 +3446,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) * If we get here we have a real problem! */ printk(KERN_WARNING - "qla1280: ISP invalid handle"); + "qla1280: ISP invalid handle\n"); } } break; @@ -3753,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt, } } + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; + /* Place command on done queue. */ list_add_tail(&sp->list, done_q); out: @@ -3808,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt, CMD_RESULT(sp->cmd) = DID_ERROR << 16; } + CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; + /* Place command on done queue. */ list_add_tail(&sp->list, done_q); } @@ -3858,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha) struct scsi_cmnd *cmd; sp = ha->outstanding_cmds[cnt]; if (sp) { - cmd = sp->cmd; CMD_RESULT(cmd) = DID_RESET << 16; - - sp->cmd = NULL; + CMD_HANDLE(cmd) = COMPLETED_HANDLE; ha->outstanding_cmds[cnt] = NULL; - - (*cmd->scsi_done)(cmd); - - sp->flags = 0; + list_add_tail(&sp->list, &ha->done_q); } } + qla1280_done(ha); + status = qla1280_load_firmware(ha); if (status) goto out; @@ -3955,13 +3907,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus) if (scsi_control == SCSI_PHASE_INVALID) { ha->bus_settings[bus].scsi_bus_dead = 1; -#if 0 - CMD_RESULT(cp) = DID_NO_CONNECT << 16; - CMD_HANDLE(cp) = INVALID_HANDLE; - /* ha->actthreads--; */ - - (*(cp)->scsi_done)(cp); -#endif return 1; /* bus is dead */ } else { ha->bus_settings[bus].scsi_bus_dead = 0; diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index d7c44b8d2b4f..834884b9eed5 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h @@ -88,7 +88,8 @@ /* Maximum outstanding commands in ISP queues */ #define MAX_OUTSTANDING_COMMANDS 512 -#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) +#define COMPLETED_HANDLE ((unsigned char *) \ + (MAX_OUTSTANDING_COMMANDS + 2)) /* ISP request and response entry counts (37-65535) */ #define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index b09993a06576..0f8796201504 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, return 0; if (IS_NOCACHE_VPD_TYPE(ha)) - ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2, + ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, ha->nvram_size); return memory_read_from_buffer(buf, count, &off, ha->nvram, ha->nvram_size); @@ -692,6 +692,109 @@ static struct bin_attribute sysfs_edc_status_attr = { .read = qla2x00_sysfs_read_edc_status, }; +static ssize_t +qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + int rval; + uint16_t actual_size; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) + return 0; + + if (ha->xgmac_data) + goto do_read; + + ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, + &ha->xgmac_data_dma, GFP_KERNEL); + if (!ha->xgmac_data) { + qla_printk(KERN_WARNING, ha, + "Unable to allocate memory for XGMAC read-data.\n"); + return 0; + } + +do_read: + actual_size = 0; + memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); + + rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, + XGMAC_DATA_SIZE, &actual_size); + if (rval != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Unable to read XGMAC data (%x).\n", rval); + count = 0; + } + + count = actual_size > count ? count: actual_size; + memcpy(buf, ha->xgmac_data, count); + + return count; +} + +static struct bin_attribute sysfs_xgmac_stats_attr = { + .attr = { + .name = "xgmac_stats", + .mode = S_IRUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_xgmac_stats, +}; + +static ssize_t +qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, + struct device, kobj))); + struct qla_hw_data *ha = vha->hw; + int rval; + uint16_t actual_size; + + if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) + return 0; + + if (ha->dcbx_tlv) + goto do_read; + + ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, + &ha->dcbx_tlv_dma, GFP_KERNEL); + if (!ha->dcbx_tlv) { + qla_printk(KERN_WARNING, ha, + "Unable to allocate memory for DCBX TLV read-data.\n"); + return 0; + } + +do_read: + actual_size = 0; + memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); + + rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, + DCBX_TLV_DATA_SIZE); + if (rval != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Unable to read DCBX TLV data (%x).\n", rval); + count = 0; + } + + memcpy(buf, ha->dcbx_tlv, count); + + return count; +} + +static struct bin_attribute sysfs_dcbx_tlv_attr = { + .attr = { + .name = "dcbx_tlv", + .mode = S_IRUSR, + }, + .size = 0, + .read = qla2x00_sysfs_read_dcbx_tlv, +}; + static struct sysfs_entry { char *name; struct bin_attribute *attr; @@ -706,6 +809,8 @@ static struct sysfs_entry { { "reset", &sysfs_reset_attr, }, { "edc", &sysfs_edc_attr, 2 }, { "edc_status", &sysfs_edc_status_attr, 2 }, + { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, + { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, { NULL }, }; @@ -721,6 +826,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) continue; + if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw)) + continue; ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); @@ -743,6 +850,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) continue; + if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha)) + continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); @@ -1088,6 +1197,58 @@ qla2x00_flash_block_size_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); } +static ssize_t +qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_QLA81XX(vha->hw)) + return snprintf(buf, PAGE_SIZE, "\n"); + + return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); +} + +static ssize_t +qla2x00_vn_port_mac_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_QLA81XX(vha->hw)) + return snprintf(buf, PAGE_SIZE, "\n"); + + return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", + vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4], + vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2], + vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]); +} + +static ssize_t +qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); +} + +static ssize_t +qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int rval; + uint16_t state[5]; + + rval = qla2x00_get_firmware_state(vha, state); + if (rval != QLA_SUCCESS) + memset(state, -1, sizeof(state)); + + return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], + state[1], state[2], state[3], state[4]); +} + static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -1116,6 +1277,11 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, NULL); +static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); +static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, + qla2x00_vn_port_mac_address_show, NULL); +static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); +static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -1138,6 +1304,10 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_mpi_version, &dev_attr_phy_version, &dev_attr_flash_block_size, + &dev_attr_vlan_id, + &dev_attr_vn_port_mac_address, + &dev_attr_fabric_param, + &dev_attr_fw_state, NULL, }; @@ -1313,7 +1483,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) * At this point all fcport's software-states are cleared. Perform any * final cleanup of firmware resources (PCBs and XCBs). */ - if (fcport->loop_id != FC_NO_LOOP_ID) + if (fcport->loop_id != FC_NO_LOOP_ID && + !test_bit(UNLOADING, &fcport->vha->dpc_flags)) fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); @@ -1437,11 +1608,13 @@ static int qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) { int ret = 0; - int cnt = 0; - uint8_t qos = QLA_DEFAULT_QUE_QOS; + uint8_t qos = 0; scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); scsi_qla_host_t *vha = NULL; struct qla_hw_data *ha = base_vha->hw; + uint16_t options = 0; + int cnt; + struct req_que *req = ha->req_q_map[0]; ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret) { @@ -1497,23 +1670,39 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qla24xx_vport_disable(fc_vport, disable); - /* Create a queue pair for the vport */ - if (ha->mqenable) { - if (ha->npiv_info) { - for (; cnt < ha->nvram_npiv_size; cnt++) { - if (ha->npiv_info[cnt].port_name == - vha->port_name && - ha->npiv_info[cnt].node_name == - vha->node_name) { - qos = ha->npiv_info[cnt].q_qos; - break; - } - } + if (ql2xmultique_tag) { + req = ha->req_q_map[1]; + goto vport_queue; + } else if (ql2xmaxqueues == 1 || !ha->npiv_info) + goto vport_queue; + /* Create a request queue in QoS mode for the vport */ + for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { + if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 + && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, + 8) == 0) { + qos = ha->npiv_info[cnt].q_qos; + break; + } + } + if (qos) { + ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, + qos); + if (!ret) + qla_printk(KERN_WARNING, ha, + "Can't create request queue for vp_idx:%d\n", + vha->vp_idx); + else { + DEBUG2(qla_printk(KERN_INFO, ha, + "Request Que:%d (QoS: %d) created for vp_idx:%d\n", + ret, qos, vha->vp_idx)); + req = ha->req_q_map[ret]; } - qla25xx_create_queues(vha, qos); } +vport_queue: + vha->req = req; return 0; + vport_create_failed_2: qla24xx_disable_vp(vha); qla24xx_deallocate_vp_id(vha); @@ -1554,8 +1743,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) vha->host_no, vha->vp_idx, vha)); } - if (ha->mqenable) { - if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) + if (vha->req->id && !ql2xmultique_tag) { + if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) qla_printk(KERN_WARNING, ha, "Queue delete failed.\n"); } diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 34760f8d4f17..4a990f4da4ea 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) int rval = QLA_SUCCESS; uint32_t cnt; - if (RD_REG_DWORD(®->hccr) & HCCRX_RISC_PAUSE) - return rval; - WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); - for (cnt = 30000; (RD_REG_DWORD(®->hccr) & HCCRX_RISC_PAUSE) == 0 && + for (cnt = 30000; + ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); @@ -351,7 +349,7 @@ static inline void * qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { uint32_t cnt, que_idx; - uint8_t req_cnt, rsp_cnt, que_cnt; + uint8_t que_cnt; struct qla2xxx_mq_chain *mq = ptr; struct device_reg_25xxmq __iomem *reg; @@ -363,9 +361,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) mq->type = __constant_htonl(DUMP_CHAIN_MQ); mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); - req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); - rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); - que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt; + que_cnt = ha->max_req_queues > ha->max_rsp_queues ? + ha->max_req_queues : ha->max_rsp_queues; mq->count = htonl(que_cnt); for (cnt = 0; cnt < que_cnt; cnt++) { reg = (struct device_reg_25xxmq *) ((void *) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 714ee67567e1..00aa48d975a6 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -93,6 +93,7 @@ #define LSD(x) ((uint32_t)((uint64_t)(x))) #define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) +#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y))) /* * I/O register @@ -179,6 +180,7 @@ #define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ #define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ +#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ struct req_que; @@ -186,7 +188,6 @@ struct req_que; * SCSI Request Block */ typedef struct srb { - struct req_que *que; struct fc_port *fcport; struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ @@ -2008,7 +2009,7 @@ typedef struct vport_params { #define VP_RET_CODE_NOT_FOUND 6 struct qla_hw_data; - +struct rsp_que; /* * ISP operations */ @@ -2030,10 +2031,9 @@ struct isp_operations { void (*enable_intrs) (struct qla_hw_data *); void (*disable_intrs) (struct qla_hw_data *); - int (*abort_command) (struct scsi_qla_host *, srb_t *, - struct req_que *); - int (*target_reset) (struct fc_port *, unsigned int); - int (*lun_reset) (struct fc_port *, unsigned int); + int (*abort_command) (srb_t *); + int (*target_reset) (struct fc_port *, unsigned int, int); + int (*lun_reset) (struct fc_port *, unsigned int, int); int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, uint8_t, uint8_t, uint16_t *, uint8_t); int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, @@ -2079,7 +2079,6 @@ struct isp_operations { #define QLA_PCI_MSIX_CONTROL 0xa2 struct scsi_qla_host; -struct rsp_que; struct qla_msix_entry { int have_irq; @@ -2140,7 +2139,6 @@ struct qla_statistics { #define MBC_INITIALIZE_MULTIQ 0x1f #define QLA_QUE_PAGE 0X1000 #define QLA_MQ_SIZE 32 -#define QLA_MAX_HOST_QUES 16 #define QLA_MAX_QUEUES 256 #define ISP_QUE_REG(ha, id) \ ((ha->mqenable) ? \ @@ -2170,6 +2168,8 @@ struct rsp_que { struct qla_hw_data *hw; struct qla_msix_entry *msix; struct req_que *req; + srb_t *status_srb; /* status continuation entry */ + struct work_struct q_work; }; /* Request queue data structure */ @@ -2222,6 +2222,8 @@ struct qla_hw_data { uint32_t fce_enabled :1; uint32_t fac_supported :1; uint32_t chip_reset_done :1; + uint32_t port0 :1; + uint32_t running_gold_fw :1; } flags; /* This spinlock is used to protect "io transactions", you must @@ -2246,7 +2248,8 @@ struct qla_hw_data { struct rsp_que **rsp_q_map; unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; - uint16_t max_queues; + uint8_t max_req_queues; + uint8_t max_rsp_queues; struct qla_npiv_entry *npiv_info; uint16_t nvram_npiv_size; @@ -2255,6 +2258,9 @@ struct qla_hw_data { #define FLOGI_MID_SUPPORT BIT_10 #define FLOGI_VSAN_SUPPORT BIT_12 #define FLOGI_SP_SUPPORT BIT_13 + + uint8_t port_no; /* Physical port of adapter */ + /* Timeout timers. */ uint8_t loop_down_abort_time; /* port down timer */ atomic_t loop_down_timer; /* loop down timer */ @@ -2392,6 +2398,14 @@ struct qla_hw_data { dma_addr_t edc_data_dma; uint16_t edc_data_len; +#define XGMAC_DATA_SIZE PAGE_SIZE + void *xgmac_data; + dma_addr_t xgmac_data_dma; + +#define DCBX_TLV_DATA_SIZE PAGE_SIZE + void *dcbx_tlv; + dma_addr_t dcbx_tlv_dma; + struct task_struct *dpc_thread; uint8_t dpc_active; /* DPC routine is active */ @@ -2510,6 +2524,7 @@ struct qla_hw_data { uint32_t flt_region_vpd; uint32_t flt_region_nvram; uint32_t flt_region_npiv_conf; + uint32_t flt_region_gold_fw; /* Needed for BEACON */ uint16_t beacon_blink_led; @@ -2536,6 +2551,7 @@ struct qla_hw_data { struct qla_chip_state_84xx *cs84xx; struct qla_statistics qla_stats; struct isp_operations *isp_ops; + struct workqueue_struct *wq; }; /* @@ -2545,6 +2561,8 @@ typedef struct scsi_qla_host { struct list_head list; struct list_head vp_fcports; /* list of fcports */ struct list_head work_list; + spinlock_t work_lock; + /* Commonly used flags and state information. */ struct Scsi_Host *host; unsigned long host_no; @@ -2591,8 +2609,6 @@ typedef struct scsi_qla_host { #define SWITCH_FOUND BIT_0 #define DFLG_NO_CABLE BIT_1 - srb_t *status_srb; /* Status continuation entry. */ - /* ISP configuration data. */ uint16_t loop_id; /* Host adapter loop id */ @@ -2618,6 +2634,11 @@ typedef struct scsi_qla_host { uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; uint8_t fabric_node_name[WWN_SIZE]; + + uint16_t fcoe_vlan_id; + uint16_t fcoe_fcf_idx; + uint8_t fcoe_vn_port_mac[6]; + uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ @@ -2643,7 +2664,7 @@ typedef struct scsi_qla_host { #define VP_ERR_FAB_LOGOUT 4 #define VP_ERR_ADAP_NORESOURCES 5 struct qla_hw_data *hw; - int req_ques[QLA_MAX_HOST_QUES]; + struct req_que *req; } scsi_qla_host_t; /* diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 96ccb9642ba0..dfde2dd865cb 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -878,7 +878,6 @@ struct device_reg_24xx { /* HCCR statuses. */ #define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ #define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ -#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */ /* HCCR commands. */ /* NOOP. */ #define HCCRX_NOOP 0x00000000 @@ -1241,6 +1240,7 @@ struct qla_flt_header { #define FLT_REG_HW_EVENT_1 0x1f #define FLT_REG_NPIV_CONF_0 0x29 #define FLT_REG_NPIV_CONF_1 0x2a +#define FLT_REG_GOLD_FW 0x2f struct qla_flt_region { uint32_t code; @@ -1405,6 +1405,8 @@ struct access_chip_rsp_84xx { #define MBC_IDC_ACK 0x101 #define MBC_RESTART_MPI_FW 0x3d #define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ +#define MBC_GET_XGMAC_STATS 0x7a +#define MBC_GET_DCBX_PARAMS 0x51 /* Flash access control option field bit definitions */ #define FAC_OPT_FORCE_SEMAPHORE BIT_15 @@ -1711,7 +1713,7 @@ struct ex_init_cb_81xx { #define FA_VPD0_ADDR_81 0xD0000 #define FA_VPD1_ADDR_81 0xD0400 #define FA_NVRAM0_ADDR_81 0xD0080 -#define FA_NVRAM1_ADDR_81 0xD0480 +#define FA_NVRAM1_ADDR_81 0xD0180 #define FA_FEATURE_ADDR_81 0xD4000 #define FA_FLASH_DESCR_ADDR_81 0xD8000 #define FA_FLASH_LAYOUT_ADDR_81 0xD8400 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 528913f6bed9..65b12d82867c 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -65,8 +65,11 @@ extern int ql2xfdmienable; extern int ql2xallocfwdump; extern int ql2xextended_error_logging; extern int ql2xqfullrampup; +extern int ql2xqfulltracking; extern int ql2xiidmaenable; extern int ql2xmaxqueues; +extern int ql2xmultique_tag; +extern int ql2xfwloadbin; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -145,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); extern int qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); -extern void +extern int qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); @@ -165,13 +168,13 @@ extern int qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); extern int -qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); +qla2x00_abort_command(srb_t *); extern int -qla2x00_abort_target(struct fc_port *, unsigned int); +qla2x00_abort_target(struct fc_port *, unsigned int, int); extern int -qla2x00_lun_reset(struct fc_port *, unsigned int); +qla2x00_lun_reset(struct fc_port *, unsigned int, int); extern int qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, @@ -236,9 +239,11 @@ extern int qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, dma_addr_t); -extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); -extern int qla24xx_abort_target(struct fc_port *, unsigned int); -extern int qla24xx_lun_reset(struct fc_port *, unsigned int); +extern int qla24xx_abort_command(srb_t *); +extern int +qla24xx_abort_target(struct fc_port *, unsigned int, int); +extern int +qla24xx_lun_reset(struct fc_port *, unsigned int, int); extern int qla2x00_system_error(scsi_qla_host_t *); @@ -288,6 +293,18 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int); extern int qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); +extern int +qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *); + +extern int +qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t); + +extern int +qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *); + +extern int +qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); + /* * Global Function Prototypes in qla_isr.c source file. */ @@ -295,8 +312,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *); extern irqreturn_t qla2300_intr_handler(int, void *); extern irqreturn_t qla24xx_intr_handler(int, void *); extern void qla2x00_process_response_queue(struct rsp_que *); -extern void qla24xx_process_response_queue(struct rsp_que *); - +extern void +qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *); extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); extern void qla2x00_free_irqs(scsi_qla_host_t *); @@ -401,19 +418,21 @@ extern int qla25xx_request_irq(struct rsp_que *); extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t, uint8_t, uint8_t); + uint16_t, int, uint8_t); extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t); + uint16_t, int); extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); extern void qla2x00_init_response_q_entries(struct rsp_que *); extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); -extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); +extern int qla25xx_delete_queues(struct scsi_qla_host *); extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); +extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 557f58d5bf88..917534b9f221 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) return ret; ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, - mb, BIT_1); + mb, BIT_1|BIT_0); if (mb[0] != MBS_COMMAND_COMPLETE) { DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", @@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) case BIT_13: list[i].fp_speed = PORT_SPEED_4GB; break; + case BIT_12: + list[i].fp_speed = PORT_SPEED_10GB; + break; case BIT_11: list[i].fp_speed = PORT_SPEED_8GB; break; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index bd7dd84c0648..262026129325 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) goto chip_diag_failed; DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", - ha->host_no)); + vha->host_no)); /* Reset RISC processor. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); @@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) goto chip_diag_failed; /* Check product ID of chip */ - DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); + DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); @@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; - /* Perform RISC reset. */ - qla24xx_reset_risc(vha); - ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; rval = qla2x00_mbx_reg_test(vha); @@ -786,7 +783,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) sizeof(uint32_t); if (ha->mqenable) mq_size = sizeof(struct qla2xxx_mq_chain); - /* Allocate memory for Fibre Channel Event Buffer. */ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) goto try_eft; @@ -850,8 +846,7 @@ cont_alloc: rsp_q_size = rsp->length * sizeof(response_t); dump_size = offsetof(struct qla2xxx_fw_dump, isp); - dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + - eft_size; + dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; ha->chain_offset = dump_size; dump_size += mq_size + fce_size; @@ -891,6 +886,56 @@ cont_alloc: htonl(offsetof(struct qla2xxx_fw_dump, isp)); } +static int +qla81xx_mpi_sync(scsi_qla_host_t *vha) +{ +#define MPS_MASK 0xe0 + int rval; + uint16_t dc; + uint32_t dw; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(vha->hw)) + return QLA_SUCCESS; + + rval = qla2x00_write_ram_word(vha, 0x7c00, 1); + if (rval != QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Sync-MPI: Unable to acquire semaphore.\n")); + goto done; + } + + pci_read_config_word(vha->hw->pdev, 0x54, &dc); + rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); + if (rval != QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Sync-MPI: Unable to read sync.\n")); + goto done_release; + } + + dc &= MPS_MASK; + if (dc == (dw & MPS_MASK)) + goto done_release; + + dw &= ~MPS_MASK; + dw |= dc; + rval = qla2x00_write_ram_word(vha, 0x7a15, dw); + if (rval != QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Sync-MPI: Unable to gain sync.\n")); + } + +done_release: + rval = qla2x00_write_ram_word(vha, 0x7c00, 0); + if (rval != QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Sync-MPI: Unable to release semaphore.\n")); + } + +done: + return rval; +} + /** * qla2x00_setup_chip() - Load and start RISC firmware. * @ha: HA context @@ -915,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->hardware_lock, flags); } + qla81xx_mpi_sync(vha); + /* Load firmware sequences */ rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { @@ -931,13 +978,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) /* Retrieve firmware information. */ if (rval == QLA_SUCCESS) { fw_major_version = ha->fw_major_version; - qla2x00_get_fw_version(vha, + rval = qla2x00_get_fw_version(vha, &ha->fw_major_version, &ha->fw_minor_version, &ha->fw_subminor_version, &ha->fw_attributes, &ha->fw_memory_size, ha->mpi_version, &ha->mpi_capabilities, ha->phy_version); + if (rval != QLA_SUCCESS) + goto failed; + ha->flags.npiv_supported = 0; if (IS_QLA2XXX_MIDTYPE(ha) && (ha->fw_attributes & BIT_2)) { @@ -989,7 +1039,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ha->fw_subminor_version); } } - +failed: if (rval) { DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", vha->host_no)); @@ -1013,12 +1063,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp) uint16_t cnt; response_t *pkt; + rsp->ring_ptr = rsp->ring; + rsp->ring_index = 0; + rsp->status_srb = NULL; pkt = rsp->ring_ptr; for (cnt = 0; cnt < rsp->length; cnt++) { pkt->signature = RESPONSE_PROCESSED; pkt++; } - } /** @@ -1176,7 +1228,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) if (ha->flags.msix_enabled) { msix = &ha->msix_entries[1]; DEBUG2_17(printk(KERN_INFO - "Reistering vector 0x%x for base que\n", msix->entry)); + "Registering vector 0x%x for base que\n", msix->entry)); icb->msix = cpu_to_le16(msix->entry); } /* Use alternate PCI bus number */ @@ -1230,14 +1282,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->hardware_lock, flags); /* Clear outstanding commands array. */ - for (que = 0; que < ha->max_queues; que++) { + for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; - for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) req->outstanding_cmds[cnt] = NULL; - req->current_outstanding_cmd = 0; + req->current_outstanding_cmd = 1; /* Initialize firmware. */ req->ring_ptr = req->ring; @@ -1245,13 +1297,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha) req->cnt = req->length; } - for (que = 0; que < ha->max_queues; que++) { + for (que = 0; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; if (!rsp) continue; - rsp->ring_ptr = rsp->ring; - rsp->ring_index = 0; - /* Initialize response queue entries */ qla2x00_init_response_q_entries(rsp); } @@ -1307,7 +1356,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) unsigned long wtime, mtime, cs84xx_time; uint16_t min_wait; /* Minimum wait time if loop is down */ uint16_t wait_time; /* Wait time if loop is coming ready */ - uint16_t state[3]; + uint16_t state[5]; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; @@ -1406,8 +1455,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) vha->host_no, state[0], jiffies)); } while (1); - DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", - vha->host_no, state[0], jiffies)); + DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", + vha->host_no, state[0], state[1], state[2], state[3], state[4], + jiffies)); if (rval) { DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", @@ -1541,6 +1591,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, char *st, *en; uint16_t index; struct qla_hw_data *ha = vha->hw; + int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha); if (memcmp(model, BINZERO, len) != 0) { strncpy(ha->model_number, model, len); @@ -1553,14 +1604,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, } index = (ha->pdev->subsystem_device & 0xff); - if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + if (use_tbl && + ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) strncpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc) - 1); } else { index = (ha->pdev->subsystem_device & 0xff); - if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + if (use_tbl && + ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) { strcpy(ha->model_number, qla2x00_model_name[index * 2]); @@ -2061,8 +2114,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - if (test_bit(RSCN_UPDATE, &save_flags)) + if (test_bit(RSCN_UPDATE, &save_flags)) { set_bit(RSCN_UPDATE, &vha->dpc_flags); + vha->flags.rscn_queue_overflow = 1; + } } return (rval); @@ -2110,7 +2165,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) goto cleanup_allocation; DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", - ha->host_no, entries)); + vha->host_no, entries)); DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, entries * sizeof(struct gid_list_info))); @@ -2243,7 +2298,8 @@ static void qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { #define LS_UNKNOWN 2 - static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; + static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; + char *link_speed; int rval; uint16_t mb[6]; struct qla_hw_data *ha = vha->hw; @@ -2266,10 +2322,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->port_name[6], fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1])); } else { + link_speed = link_speeds[LS_UNKNOWN]; + if (fcport->fp_speed < 5) + link_speed = link_speeds[fcport->fp_speed]; + else if (fcport->fp_speed == 0x13) + link_speed = link_speeds[5]; DEBUG2(qla_printk(KERN_INFO, ha, "iIDMA adjusted to %s GB/s on " "%02x%02x%02x%02x%02x%02x%02x%02x.\n", - link_speeds[fcport->fp_speed], fcport->port_name[0], + link_speed, fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], @@ -3180,9 +3241,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; uint32_t wait_time; - struct qla_hw_data *ha = vha->hw; - struct req_que *req = ha->req_q_map[vha->req_ques[0]]; - struct rsp_que *rsp = req->rsp; + struct req_que *req; + struct rsp_que *rsp; + + if (ql2xmultique_tag) + req = vha->hw->req_q_map[0]; + else + req = vha->req; + rsp = req->rsp; atomic_set(&vha->loop_state, LOOP_UPDATE); clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); @@ -3448,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) int ret = -1; int i; - for (i = 1; i < ha->max_queues; i++) { + for (i = 1; i < ha->max_rsp_queues; i++) { rsp = ha->rsp_q_map[i]; if (rsp) { rsp->options &= ~BIT_0; @@ -3462,6 +3528,8 @@ qla25xx_init_queues(struct qla_hw_data *ha) "%s Rsp que:%d inited\n", __func__, rsp->id)); } + } + for (i = 1; i < ha->max_req_queues; i++) { req = ha->req_q_map[i]; if (req) { /* Clear outstanding commands array. */ @@ -3566,14 +3634,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv = ha->nvram; /* Determine NVRAM starting address. */ - ha->nvram_size = sizeof(struct nvram_24xx); - ha->nvram_base = FA_NVRAM_FUNC0_ADDR; - ha->vpd_size = FA_NVRAM_VPD_SIZE; - ha->vpd_base = FA_NVRAM_VPD0_ADDR; - if (PCI_FUNC(ha->pdev->devfn)) { + if (ha->flags.port0) { + ha->nvram_base = FA_NVRAM_FUNC0_ADDR; + ha->vpd_base = FA_NVRAM_VPD0_ADDR; + } else { ha->nvram_base = FA_NVRAM_FUNC1_ADDR; ha->vpd_base = FA_NVRAM_VPD1_ADDR; } + ha->nvram_size = sizeof(struct nvram_24xx); + ha->vpd_size = FA_NVRAM_VPD_SIZE; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; @@ -3587,7 +3656,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) chksum += le32_to_cpu(*dptr++); - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); + DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); /* Bad NVRAM data, set defaults parameters. */ @@ -3612,7 +3681,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv->exchange_count = __constant_cpu_to_le16(0); nv->hard_address = __constant_cpu_to_le16(124); nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); + nv->port_name[1] = 0x00 + ha->port_no; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; @@ -3798,11 +3867,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) } static int -qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) +qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, + uint32_t faddr) { int rval = QLA_SUCCESS; int segments, fragment; - uint32_t faddr; uint32_t *dcode, dlen; uint32_t risc_addr; uint32_t risc_size; @@ -3811,12 +3880,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) struct req_que *req = ha->req_q_map[0]; qla_printk(KERN_INFO, ha, - "FW: Loading from flash (%x)...\n", ha->flt_region_fw); + "FW: Loading from flash (%x)...\n", faddr); rval = QLA_SUCCESS; segments = FA_RISC_CODE_SEGMENTS; - faddr = ha->flt_region_fw; dcode = (uint32_t *)req->ring; *srisc_addr = 0; @@ -4104,6 +4172,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; + if (ql2xfwloadbin == 1) + return qla81xx_load_risc(vha, srisc_addr); + /* * FW Load priority: * 1) Firmware via request-firmware interface (.bin file). @@ -4113,24 +4184,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) if (rval == QLA_SUCCESS) return rval; - return qla24xx_load_risc_flash(vha, srisc_addr); + return qla24xx_load_risc_flash(vha, srisc_addr, + vha->hw->flt_region_fw); } int qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; + struct qla_hw_data *ha = vha->hw; + + if (ql2xfwloadbin == 2) + goto try_blob_fw; /* * FW Load priority: * 1) Firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). + * 3) Golden-Firmware residing in flash -- limited operation. */ - rval = qla24xx_load_risc_flash(vha, srisc_addr); + rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); if (rval == QLA_SUCCESS) return rval; - return qla24xx_load_risc_blob(vha, srisc_addr); +try_blob_fw: + rval = qla24xx_load_risc_blob(vha, srisc_addr); + if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) + return rval; + + qla_printk(KERN_ERR, ha, + "FW: Attempting to fallback to golden firmware...\n"); + rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); + if (rval != QLA_SUCCESS) + return rval; + + qla_printk(KERN_ERR, ha, + "FW: Please update operational firmware...\n"); + ha->flags.running_gold_fw = 1; + + return rval; } void @@ -4146,7 +4238,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) ret = qla2x00_stop_firmware(vha); for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && - retries ; retries--) { + ret != QLA_INVALID_COMMAND && retries ; retries--) { ha->isp_ops->reset_chip(vha); if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) continue; @@ -4165,13 +4257,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - struct req_que *req = ha->req_q_map[vha->req_ques[0]]; - struct rsp_que *rsp = req->rsp; + struct req_que *req; + struct rsp_que *rsp; if (!vha->vp_idx) return -EINVAL; rval = qla2x00_fw_ready(base_vha); + if (ql2xmultique_tag) + req = ha->req_q_map[0]; + else + req = vha->req; + rsp = req->rsp; + if (rval == QLA_SUCCESS) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); @@ -4305,7 +4403,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) chksum += le32_to_cpu(*dptr++); - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); + DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); /* Bad NVRAM data, set defaults parameters. */ @@ -4329,7 +4427,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); nv->exchange_count = __constant_cpu_to_le16(0); nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); + nv->port_name[1] = 0x00 + ha->port_no; nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; @@ -4358,12 +4456,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->max_luns_per_target = __constant_cpu_to_le16(128); nv->port_down_retry_count = __constant_cpu_to_le16(30); nv->link_down_timeout = __constant_cpu_to_le16(30); - nv->enode_mac[0] = 0x01; + nv->enode_mac[0] = 0x00; nv->enode_mac[1] = 0x02; nv->enode_mac[2] = 0x03; nv->enode_mac[3] = 0x04; nv->enode_mac[4] = 0x05; - nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); + nv->enode_mac[5] = 0x06 + ha->port_no; rval = 1; } @@ -4396,7 +4494,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) icb->enode_mac[2] = 0x03; icb->enode_mac[3] = 0x04; icb->enode_mac[4] = 0x05; - icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); + icb->enode_mac[5] = 0x06 + ha->port_no; } /* Use extended-initialization control block. */ diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index a8abbb95730d..13396beae2ce 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *, struct rsp_que *rsp); static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); +static void qla25xx_set_que(srb_t *, struct rsp_que **); /** * qla2x00_get_cmd_direction() - Determine control_flag data direction. * @cmd: SCSI command @@ -92,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds) * Returns a pointer to the Continuation Type 0 IOCB packet. */ static inline cont_entry_t * -qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) +qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) { cont_entry_t *cont_pkt; + struct req_que *req = vha->req; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { @@ -120,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) * Returns a pointer to the continuation type 1 IOCB packet. */ static inline cont_a64_entry_t * -qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) +qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) { cont_a64_entry_t *cont_pkt; + struct req_que *req = vha->req; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { @@ -159,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *sg; int i; - struct req_que *req; cmd = sp->cmd; @@ -174,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, } vha = sp->fcport->vha; - req = sp->que; - cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Three DSDs are available in the Command Type 2 IOCB */ @@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, * Seven DSDs are available in the Continuation * Type 0 IOCB. */ - cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); + cont_pkt = qla2x00_prep_cont_type0_iocb(vha); cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; avail_dsds = 7; } @@ -220,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *sg; int i; - struct req_que *req; cmd = sp->cmd; @@ -235,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, } vha = sp->fcport->vha; - req = sp->que; - cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Two DSDs are available in the Command Type 3 IOCB */ @@ -254,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, * Five DSDs are available in the Continuation * Type 1 IOCB. */ - cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); + cont_pkt = qla2x00_prep_cont_type1_iocb(vha); cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; avail_dsds = 5; } @@ -353,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp) /* Build command packet */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; - sp->que = req; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; @@ -453,6 +449,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, mrk24->lun[2] = MSB(lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); mrk24->vp_index = vha->vp_idx; + mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16(lun); @@ -531,9 +528,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req, for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) *dword_ptr++ = 0; - /* Set system defined field. */ - pkt->sys_define = (uint8_t)req->ring_index; - /* Set entry count. */ pkt->entry_count = 1; @@ -656,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, } vha = sp->fcport->vha; - req = sp->que; + req = vha->req; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -687,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, * Five DSDs are available in the Continuation * Type 1 IOCB. */ - cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); + cont_pkt = qla2x00_prep_cont_type1_iocb(vha); cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; avail_dsds = 5; } @@ -724,19 +718,13 @@ qla24xx_start_scsi(srb_t *sp) struct scsi_cmnd *cmd = sp->cmd; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; - uint16_t que_id; /* Setup device pointers. */ ret = 0; - que_id = vha->req_ques[0]; - req = ha->req_q_map[que_id]; - sp->que = req; + qla25xx_set_que(sp, &rsp); + req = vha->req; - if (req->rsp) - rsp = req->rsp; - else - rsp = ha->rsp_q_map[que_id]; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; @@ -794,7 +782,7 @@ qla24xx_start_scsi(srb_t *sp) req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; - cmd_pkt->handle = handle; + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ @@ -823,6 +811,8 @@ qla24xx_start_scsi(srb_t *sp) /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; + /* Specify response queue number where completion should happen */ + cmd_pkt->entry_status = (uint8_t) rsp->id; wmb(); /* Adjust ring index. */ @@ -842,7 +832,7 @@ qla24xx_start_scsi(srb_t *sp) /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(rsp); + qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; @@ -855,3 +845,16 @@ queuing_error: return QLA_FUNCTION_FAILED; } + +static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) +{ + struct scsi_cmnd *cmd = sp->cmd; + struct qla_hw_data *ha = sp->fcport->vha->hw; + int affinity = cmd->request->cpu; + + if (ql2xmultique_tag && affinity >= 0 && + affinity < ha->max_rsp_queues - 1) + *rsp = ha->rsp_q_map[affinity + 1]; + else + *rsp = ha->rsp_q_map[0]; +} diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d04981848e56..c8d0a176fea4 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); -static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); +static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); -static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *); /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. @@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(®->hccr); if (hccr & HCCR_RISC_PAUSE) { @@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) { @@ -685,7 +684,7 @@ skip_rio: vha->host_no)); if (IS_FWI2_CAPABLE(ha)) - qla24xx_process_response_queue(rsp); + qla24xx_process_response_queue(vha, rsp); else qla2x00_process_response_queue(rsp); break; @@ -766,7 +765,10 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; - req = ha->req_q_map[vha->req_ques[0]]; + if (!ql2xqfulltracking) + return; + + req = vha->req; if (!req) return; if (req->max_q_depth <= sdev->queue_depth) @@ -808,6 +810,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req, fc_port_t *fcport; struct scsi_device *sdev; + if (!ql2xqfulltracking) + return; + sdev = sp->cmd->device; if (sdev->queue_depth >= req->max_q_depth) return; @@ -858,8 +863,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, qla2x00_ramp_up_queue_depth(vha, req, sp); qla2x00_sp_compl(ha, sp); } else { - DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", - vha->host_no)); + DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" + " handle(%d)\n", vha->host_no, req->id, index)); qla_printk(KERN_WARNING, ha, "Invalid ISP SCSI completion handle\n"); @@ -881,7 +886,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp) uint16_t handle_cnt; uint16_t cnt; - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); if (!vha->flags.online) return; @@ -926,7 +931,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp) } break; case STATUS_CONT_TYPE: - qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); + qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; default: /* Type Not Supported. */ @@ -945,7 +950,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp) } static inline void -qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) +qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len, + struct rsp_que *rsp) { struct scsi_cmnd *cp = sp->cmd; @@ -962,7 +968,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) sp->request_sense_ptr += sense_len; sp->request_sense_length -= sense_len; if (sp->request_sense_length != 0) - sp->fcport->vha->status_srb = sp; + rsp->status_srb = sp; DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, @@ -992,7 +998,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; uint8_t *rsp_info, *sense_data; struct qla_hw_data *ha = vha->hw; - struct req_que *req = rsp->req; + uint32_t handle; + uint16_t que; + struct req_que *req; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -1003,18 +1011,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) comp_status = le16_to_cpu(sts->comp_status); scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } - + handle = (uint32_t) LSW(sts->handle); + que = MSW(sts->handle); + req = ha->req_q_map[que]; /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { - qla2x00_process_completed_request(vha, req, sts->handle); + qla2x00_process_completed_request(vha, req, handle); return; } /* Validate handle. */ - if (sts->handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[sts->handle]; - req->outstanding_cmds[sts->handle] = NULL; + if (handle < MAX_OUTSTANDING_COMMANDS) { + sp = req->outstanding_cmds[handle]; + req->outstanding_cmds[handle] = NULL; } else sp = NULL; @@ -1030,7 +1040,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp = sp->cmd; if (cp == NULL) { DEBUG2(printk("scsi(%ld): Command already returned back to OS " - "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); + "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp)); qla_printk(KERN_WARNING, ha, "Command is NULL: already returned to OS (sp=%p)\n", sp); @@ -1121,6 +1131,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) scsi_status)); /* Adjust queue depth for all luns on the port. */ + if (!ql2xqfulltracking) + break; fcport->last_queue_full = jiffies; starget_for_each_device(cp->device->sdev_target, fcport, qla2x00_adjust_sdev_qdepth_down); @@ -1133,7 +1145,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (!(scsi_status & SS_SENSE_LEN_VALID)) break; - qla2x00_handle_sense(sp, sense_data, sense_len); + qla2x00_handle_sense(sp, sense_data, sense_len, rsp); break; case CS_DATA_UNDERRUN: @@ -1179,6 +1191,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) * Adjust queue depth for all luns on the * port. */ + if (!ql2xqfulltracking) + break; fcport->last_queue_full = jiffies; starget_for_each_device( cp->device->sdev_target, fcport, @@ -1192,12 +1206,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (!(scsi_status & SS_SENSE_LEN_VALID)) break; - qla2x00_handle_sense(sp, sense_data, sense_len); + qla2x00_handle_sense(sp, sense_data, sense_len, rsp); } else { /* * If RISC reports underrun and target does not report * it then we must have a lost frame, so tell upper - * layer to retry it by reporting a bus busy. + * layer to retry it by reporting an error. */ if (!(scsi_status & SS_RESIDUAL_UNDER)) { DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " @@ -1207,7 +1221,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp->device->id, cp->device->lun, resid, scsi_bufflen(cp))); - cp->result = DID_BUS_BUSY << 16; + cp->result = DID_ERROR << 16; break; } @@ -1334,7 +1348,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) } /* Place command on done queue. */ - if (vha->status_srb == NULL) + if (rsp->status_srb == NULL) qla2x00_sp_compl(ha, sp); } @@ -1346,11 +1360,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) * Extended sense data. */ static void -qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) +qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) { uint8_t sense_sz = 0; - struct qla_hw_data *ha = vha->hw; - srb_t *sp = vha->status_srb; + struct qla_hw_data *ha = rsp->hw; + srb_t *sp = rsp->status_srb; struct scsi_cmnd *cp; if (sp != NULL && sp->request_sense_length != 0) { @@ -1362,7 +1376,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) "cmd is NULL: already returned to OS (sp=%p)\n", sp); - vha->status_srb = NULL; + rsp->status_srb = NULL; return; } @@ -1383,7 +1397,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sp->request_sense_length == 0) { - vha->status_srb = NULL; + rsp->status_srb = NULL; qla2x00_sp_compl(ha, sp); } } @@ -1399,7 +1413,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; - struct req_que *req = rsp->req; + uint32_t handle = LSW(pkt->handle); + uint16_t que = MSW(pkt->handle); + struct req_que *req = ha->req_q_map[que]; #if defined(QL_DEBUG_LEVEL_2) if (pkt->entry_status & RF_INV_E_ORDER) qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); @@ -1417,14 +1433,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) #endif /* Validate handle. */ - if (pkt->handle < MAX_OUTSTANDING_COMMANDS) - sp = req->outstanding_cmds[pkt->handle]; + if (handle < MAX_OUTSTANDING_COMMANDS) + sp = req->outstanding_cmds[handle]; else sp = NULL; if (sp) { /* Free outstanding command slot. */ - req->outstanding_cmds[pkt->handle] = NULL; + req->outstanding_cmds[handle] = NULL; /* Bad payload or header */ if (pkt->entry_status & @@ -1486,13 +1502,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context */ -void -qla24xx_process_response_queue(struct rsp_que *rsp) +void qla24xx_process_response_queue(struct scsi_qla_host *vha, + struct rsp_que *rsp) { struct sts_entry_24xx *pkt; - struct scsi_qla_host *vha; - - vha = qla2x00_get_rsp_host(rsp); if (!vha->flags.online) return; @@ -1523,7 +1536,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp) qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_CONT_TYPE: - qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); + qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); break; case VP_RPT_ID_IOCB_TYPE: qla24xx_report_id_acquisition(vha, @@ -1626,7 +1639,7 @@ qla24xx_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { @@ -1664,7 +1677,7 @@ qla24xx_intr_handler(int irq, void *dev_id) break; case 0x13: case 0x14: - qla24xx_process_response_queue(rsp); + qla24xx_process_response_queue(vha, rsp); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " @@ -1692,6 +1705,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; + struct scsi_qla_host *vha; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1704,7 +1718,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) spin_lock_irq(&ha->hardware_lock); - qla24xx_process_response_queue(rsp); + vha = qla25xx_get_host(rsp); + qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); spin_unlock_irq(&ha->hardware_lock); @@ -1717,7 +1732,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; struct rsp_que *rsp; - struct device_reg_24xx __iomem *reg; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1726,13 +1740,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) return IRQ_NONE; } ha = rsp->hw; - reg = &ha->iobase->isp24; - spin_lock_irq(&ha->hardware_lock); - - qla24xx_process_response_queue(rsp); - - spin_unlock_irq(&ha->hardware_lock); + queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); return IRQ_HANDLED; } @@ -1760,7 +1769,7 @@ qla24xx_msix_default(int irq, void *dev_id) status = 0; spin_lock_irq(&ha->hardware_lock); - vha = qla2x00_get_rsp_host(rsp); + vha = pci_get_drvdata(ha->pdev); do { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { @@ -1798,7 +1807,7 @@ qla24xx_msix_default(int irq, void *dev_id) break; case 0x13: case 0x14: - qla24xx_process_response_queue(rsp); + qla24xx_process_response_queue(vha, rsp); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " @@ -1822,31 +1831,14 @@ qla24xx_msix_default(int irq, void *dev_id) /* Interrupt handling helpers. */ struct qla_init_msix_entry { - uint16_t entry; - uint16_t index; const char *name; irq_handler_t handler; }; -static struct qla_init_msix_entry base_queue = { - .entry = 0, - .index = 0, - .name = "qla2xxx (default)", - .handler = qla24xx_msix_default, -}; - -static struct qla_init_msix_entry base_rsp_queue = { - .entry = 1, - .index = 1, - .name = "qla2xxx (rsp_q)", - .handler = qla24xx_msix_rsp_q, -}; - -static struct qla_init_msix_entry multi_rsp_queue = { - .entry = 1, - .index = 1, - .name = "qla2xxx (multi_q)", - .handler = qla25xx_msix_rsp_q, +static struct qla_init_msix_entry msix_entries[3] = { + { "qla2xxx (default)", qla24xx_msix_default }, + { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, + { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, }; static void @@ -1873,7 +1865,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; - struct qla_init_msix_entry *msix_queue; entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, GFP_KERNEL); @@ -1900,7 +1891,7 @@ msix_failed: ha->msix_count, ret); goto msix_out; } - ha->max_queues = ha->msix_count - 1; + ha->max_rsp_queues = ha->msix_count - 1; } ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); @@ -1918,45 +1909,27 @@ msix_failed: qentry->rsp = NULL; } - /* Enable MSI-X for AENs for queue 0 */ - qentry = &ha->msix_entries[0]; - ret = request_irq(qentry->vector, base_queue.handler, 0, - base_queue.name, rsp); - if (ret) { - qla_printk(KERN_WARNING, ha, + /* Enable MSI-X vectors for the base queue */ + for (i = 0; i < 2; i++) { + qentry = &ha->msix_entries[i]; + ret = request_irq(qentry->vector, msix_entries[i].handler, + 0, msix_entries[i].name, rsp); + if (ret) { + qla_printk(KERN_WARNING, ha, "MSI-X: Unable to register handler -- %x/%d.\n", qentry->vector, ret); - qla24xx_disable_msix(ha); - goto msix_out; + qla24xx_disable_msix(ha); + ha->mqenable = 0; + goto msix_out; + } + qentry->have_irq = 1; + qentry->rsp = rsp; + rsp->msix = qentry; } - qentry->have_irq = 1; - qentry->rsp = rsp; /* Enable MSI-X vector for response queue update for queue 0 */ - if (ha->max_queues > 1 && ha->mqiobase) { + if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) ha->mqenable = 1; - msix_queue = &multi_rsp_queue; - qla_printk(KERN_INFO, ha, - "MQ enabled, Number of Queue Resources: %d \n", - ha->max_queues); - } else { - ha->mqenable = 0; - msix_queue = &base_rsp_queue; - } - - qentry = &ha->msix_entries[1]; - ret = request_irq(qentry->vector, msix_queue->handler, 0, - msix_queue->name, rsp); - if (ret) { - qla_printk(KERN_WARNING, ha, - "MSI-X: Unable to register handler -- %x/%d.\n", - qentry->vector, ret); - qla24xx_disable_msix(ha); - ha->mqenable = 0; - goto msix_out; - } - qentry->have_irq = 1; - qentry->rsp = rsp; msix_out: kfree(entries); @@ -2063,35 +2036,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) } } -static struct scsi_qla_host * -qla2x00_get_rsp_host(struct rsp_que *rsp) -{ - srb_t *sp; - struct qla_hw_data *ha = rsp->hw; - struct scsi_qla_host *vha = NULL; - struct sts_entry_24xx *pkt; - struct req_que *req; - - if (rsp->id) { - pkt = (struct sts_entry_24xx *) rsp->ring_ptr; - req = rsp->req; - if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[pkt->handle]; - if (sp) - vha = sp->fcport->vha; - } - } - if (!vha) - /* handle it in base queue */ - vha = pci_get_drvdata(ha->pdev); - - return vha; -} int qla25xx_request_irq(struct rsp_que *rsp) { struct qla_hw_data *ha = rsp->hw; - struct qla_init_msix_entry *intr = &multi_rsp_queue; + struct qla_init_msix_entry *intr = &msix_entries[2]; struct qla_msix_entry *msix = rsp->msix; int ret; @@ -2106,3 +2055,30 @@ int qla25xx_request_irq(struct rsp_que *rsp) msix->rsp = rsp; return ret; } + +struct scsi_qla_host * +qla25xx_get_host(struct rsp_que *rsp) +{ + srb_t *sp; + struct qla_hw_data *ha = rsp->hw; + struct scsi_qla_host *vha = NULL; + struct sts_entry_24xx *pkt; + struct req_que *req; + uint16_t que; + uint32_t handle; + + pkt = (struct sts_entry_24xx *) rsp->ring_ptr; + que = MSW(pkt->handle); + handle = (uint32_t) LSW(pkt->handle); + req = ha->req_q_map[que]; + if (handle < MAX_OUTSTANDING_COMMANDS) { + sp = req->outstanding_cmds[handle]; + if (sp) + return sp->fcport->vha; + else + goto base_que; + } +base_que: + vha = pci_get_drvdata(ha->pdev); + return vha; +} diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index e67c1660bf46..451ece0760b0 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) * Context: * Kernel context. */ -void +int qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, uint32_t *mpi_caps, uint8_t *phy) @@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) + goto failed; /* Return mailbox data. */ *major = mcp->mb[1]; @@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, phy[1] = mcp->mb[9] >> 8; phy[2] = mcp->mb[9] & 0xff; } - +failed: if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, @@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, /*EMPTY*/ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } + return rval; } /* @@ -748,20 +751,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, * Kernel context. */ int -qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) +qla2x00_abort_command(srb_t *sp) { unsigned long flags = 0; - fc_port_t *fcport; int rval; uint32_t handle = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + fc_port_t *fcport = sp->fcport; + scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; + struct req_que *req = vha->req; DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); - fcport = sp->fcport; - spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { if (req->outstanding_cmds[handle] == sp) @@ -800,7 +803,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) } int -qla2x00_abort_target(struct fc_port *fcport, unsigned int l) +qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; @@ -813,8 +816,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l) l = l; vha = fcport->vha; - req = vha->hw->req_q_map[0]; - rsp = vha->hw->rsp_q_map[0]; + req = vha->hw->req_q_map[tag]; + rsp = vha->hw->rsp_q_map[tag]; mcp->mb[0] = MBC_ABORT_TARGET; mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { @@ -850,7 +853,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l) } int -qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) +qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { int rval, rval2; mbx_cmd_t mc; @@ -862,8 +865,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); vha = fcport->vha; - req = vha->hw->req_q_map[0]; - rsp = vha->hw->rsp_q_map[0]; + req = vha->hw->req_q_map[tag]; + rsp = vha->hw->rsp_q_map[tag]; mcp->mb[0] = MBC_LUN_RESET; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) @@ -931,6 +934,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_0; mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + if (IS_QLA81XX(vha->hw)) + mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -952,9 +957,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", vha->host_no, rval)); } else { - /*EMPTY*/ DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", vha->host_no)); + + if (IS_QLA81XX(vha->hw)) { + vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; + vha->fcoe_fcf_idx = mcp->mb[10]; + vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; + vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; + vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; + vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; + vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; + vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; + } } return rval; @@ -1252,7 +1267,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; - mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -1261,6 +1276,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) states[0] = mcp->mb[1]; states[1] = mcp->mb[2]; states[2] = mcp->mb[3]; + states[3] = mcp->mb[4]; + states[4] = mcp->mb[5]; if (rval != QLA_SUCCESS) { /*EMPTY*/ @@ -1480,9 +1497,17 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, dma_addr_t lg_dma; uint32_t iop[2]; struct qla_hw_data *ha = vha->hw; + struct req_que *req; + struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + if (ql2xmultique_tag) + req = ha->req_q_map[0]; + else + req = vha->req; + rsp = req->rsp; + lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", @@ -1493,6 +1518,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; + lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) @@ -1741,6 +1767,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, struct logio_entry_24xx *lg; dma_addr_t lg_dma; struct qla_hw_data *ha = vha->hw; + struct req_que *req; + struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); @@ -1752,8 +1780,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, } memset(lg, 0, sizeof(struct logio_entry_24xx)); + if (ql2xmaxqueues > 1) + req = ha->req_q_map[0]; + else + req = vha->req; + rsp = req->rsp; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; + lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); @@ -1864,9 +1898,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (IS_QLA81XX(vha->hw)) - return QLA_SUCCESS; - DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", vha->host_no)); @@ -2195,21 +2226,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, } int -qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) +qla24xx_abort_command(srb_t *sp) { int rval; - fc_port_t *fcport; unsigned long flags = 0; struct abort_entry_24xx *abt; dma_addr_t abt_dma; uint32_t handle; + fc_port_t *fcport = sp->fcport; + struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; + struct req_que *req = vha->req; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - fcport = sp->fcport; - spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { if (req->outstanding_cmds[handle] == sp) @@ -2231,6 +2262,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) abt->entry_type = ABORT_IOCB_TYPE; abt->entry_count = 1; + abt->handle = MAKE_HANDLE(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); abt->handle_to_abort = handle; abt->port_id[0] = fcport->d_id.b.al_pa; @@ -2272,7 +2304,7 @@ struct tsk_mgmt_cmd { static int __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, - unsigned int l) + unsigned int l, int tag) { int rval, rval2; struct tsk_mgmt_cmd *tsk; @@ -2286,8 +2318,11 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, vha = fcport->vha; ha = vha->hw; - req = ha->req_q_map[0]; - rsp = ha->rsp_q_map[0]; + req = vha->req; + if (ql2xmultique_tag) + rsp = ha->rsp_q_map[tag + 1]; + else + rsp = req->rsp; tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " @@ -2298,6 +2333,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; tsk->p.tsk.entry_count = 1; + tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->p.tsk.control_flags = cpu_to_le32(type); @@ -2344,15 +2380,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, } int -qla24xx_abort_target(struct fc_port *fcport, unsigned int l) +qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) { - return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); + return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); } int -qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) +qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag) { - return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); + return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); } int @@ -2446,6 +2482,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); + if (mcp->mb[0] == MBS_INVALID_COMMAND) + rval = QLA_INVALID_COMMAND; } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } @@ -2717,8 +2755,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (vp_idx == 0) return; - if (MSB(stat) == 1) + if (MSB(stat) == 1) { + DEBUG2(printk("scsi(%ld): Could not acquire ID for " + "VP[%d].\n", vha->host_no, vp_idx)); return; + } list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) if (vp_idx == vp->vp_idx) @@ -3141,6 +3182,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) WRT_REG_DWORD(®->req_q_in, 0); WRT_REG_DWORD(®->req_q_out, 0); } + req->req_q_in = ®->req_q_in; + req->req_q_out = ®->req_q_out; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); @@ -3167,7 +3210,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[6] = MSW(MSD(rsp->dma)); mcp->mb[7] = LSW(MSD(rsp->dma)); mcp->mb[5] = rsp->length; - mcp->mb[11] = rsp->vp_idx; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; @@ -3179,7 +3221,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; - mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 + mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; @@ -3384,7 +3426,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } return rval; @@ -3428,3 +3470,141 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, return rval; } + +int +qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, + uint16_t size_in_bytes, uint16_t *actual_size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA81XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_GET_XGMAC_STATS; + mcp->mb[2] = MSW(stats_dma); + mcp->mb[3] = LSW(stats_dma); + mcp->mb[6] = MSW(MSD(stats_dma)); + mcp->mb[7] = LSW(MSD(stats_dma)); + mcp->mb[8] = size_in_bytes >> 2; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " + "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, + mcp->mb[0], mcp->mb[1], mcp->mb[2])); + } else { + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + + *actual_size = mcp->mb[2] << 2; + } + + return rval; +} + +int +qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, + uint16_t size) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA81XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_GET_DCBX_PARAMS; + mcp->mb[1] = 0; + mcp->mb[2] = MSW(tlv_dma); + mcp->mb[3] = LSW(tlv_dma); + mcp->mb[6] = MSW(MSD(tlv_dma)); + mcp->mb[7] = LSW(MSD(tlv_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " + "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, + mcp->mb[0], mcp->mb[1], mcp->mb[2])); + } else { + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + } + + return rval; +} + +int +qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_READ_RAM_EXTENDED; + mcp->mb[1] = LSW(risc_addr); + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_0; + mcp->tov = 30; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, + vha->host_no, rval, mcp->mb[0])); + } else { + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + *data = mcp->mb[3] << 16 | mcp->mb[2]; + } + + return rval; +} + +int +qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return QLA_FUNCTION_FAILED; + + DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + + mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; + mcp->mb[1] = LSW(risc_addr); + mcp->mb[2] = LSW(data); + mcp->mb[3] = MSW(data); + mcp->mb[8] = MSW(risc_addr); + mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = 30; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, + vha->host_no, rval, mcp->mb[0])); + } else { + DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 51716c7e3008..650bcef08f2a 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); - memset(vha->req_ques, 0, sizeof(vha->req_ques)); - vha->req_ques[0] = ha->req_q_map[0]->id; - host->can_queue = ha->req_q_map[0]->length + 128; + vha->req = base_vha->req; + host->can_queue = base_vha->req->length + 128; host->this_id = 255; host->cmd_per_lun = 3; host->max_cmd_len = MAX_CMDSZ; @@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos) /* Delete all queues for a given vhost */ int -qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) +qla25xx_delete_queues(struct scsi_qla_host *vha) { int cnt, ret = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_hw_data *ha = vha->hw; - if (que_no) { - /* Delete request queue */ - req = ha->req_q_map[que_no]; + /* Delete request queues */ + for (cnt = 1; cnt < ha->max_req_queues; cnt++) { + req = ha->req_q_map[cnt]; if (req) { - rsp = req->rsp; ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { qla_printk(KERN_WARNING, ha, - "Couldn't delete req que %d\n", req->id); + "Couldn't delete req que %d\n", + req->id); return ret; } - /* Delete associated response queue */ - if (rsp) { - ret = qla25xx_delete_rsp_que(vha, rsp); - if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete rsp que %d\n", - rsp->id); - return ret; - } - } } - } else { /* delete all queues of this host */ - for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { - /* Delete request queues */ - req = ha->req_q_map[vha->req_ques[cnt]]; - if (req && req->id) { - rsp = req->rsp; - ret = qla25xx_delete_req_que(vha, req); - if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete req que %d\n", - vha->req_ques[cnt]); - return ret; - } - vha->req_ques[cnt] = ha->req_q_map[0]->id; - /* Delete associated response queue */ - if (rsp && rsp->id) { - ret = qla25xx_delete_rsp_que(vha, rsp); - if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete rsp que %d\n", - rsp->id); - return ret; - } - } + } + + /* Delete response queues */ + for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { + rsp = ha->rsp_q_map[cnt]; + if (rsp) { + ret = qla25xx_delete_rsp_que(vha, rsp); + if (ret != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Couldn't delete rsp que %d\n", + rsp->id); + return ret; } } } - qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n", - vha->vp_idx); return ret; } int qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) + uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos) { int ret = 0; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t __iomem *reg; + uint32_t cnt; req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (req == NULL) { @@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, } mutex_lock(&ha->vport_lock); - que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); - if (que_id >= ha->max_queues) { + que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); + if (que_id >= ha->max_req_queues) { mutex_unlock(&ha->vport_lock); qla_printk(KERN_INFO, ha, "No resources to create " "additional request queue\n"); @@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->vp_idx = vp_idx; req->qos = qos; - if (ha->rsp_q_map[rsp_que]) { + if (rsp_que < 0) + req->rsp = NULL; + else req->rsp = ha->rsp_q_map[rsp_que]; - req->rsp->req = req; - } /* Use alternate PCI bus number */ if (MSB(req->rid)) options |= BIT_4; @@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, if (LSB(req->rid)) options |= BIT_5; req->options = options; + + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) + req->outstanding_cmds[cnt] = NULL; + req->current_outstanding_cmd = 1; + req->ring_ptr = req->ring; req->ring_index = 0; req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); - req->req_q_in = ®->isp25mq.req_q_in; - req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; mutex_unlock(&ha->vport_lock); @@ -654,10 +633,19 @@ que_failed: return 0; } +static void qla_do_work(struct work_struct *work) +{ + struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); + struct scsi_qla_host *vha; + + vha = qla25xx_get_host(rsp); + qla24xx_process_response_queue(vha, rsp); +} + /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid) + uint8_t vp_idx, uint16_t rid, int req) { int ret = 0; struct rsp_que *rsp = NULL; @@ -672,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, goto que_failed; } - rsp->length = RESPONSE_ENTRY_CNT_2300; + rsp->length = RESPONSE_ENTRY_CNT_MQ; rsp->ring = dma_alloc_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), &rsp->dma, GFP_KERNEL); @@ -683,8 +671,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, } mutex_lock(&ha->vport_lock); - que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); - if (que_id >= ha->max_queues) { + que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); + if (que_id >= ha->max_rsp_queues) { mutex_unlock(&ha->vport_lock); qla_printk(KERN_INFO, ha, "No resources to create " "additional response queue\n"); @@ -708,8 +696,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, if (LSB(rsp->rid)) options |= BIT_5; rsp->options = options; - rsp->ring_ptr = rsp->ring; - rsp->ring_index = 0; rsp->id = que_id; reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = ®->isp25mq.rsp_q_in; @@ -728,9 +714,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, mutex_unlock(&ha->vport_lock); goto que_failed; } + if (req >= 0) + rsp->req = ha->req_q_map[req]; + else + rsp->req = NULL; qla2x00_init_response_q_entries(rsp); - + if (rsp->hw->wq) + INIT_WORK(&rsp->q_work, qla_do_work); return rsp->id; que_failed: @@ -744,14 +735,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos) uint16_t options = 0; uint8_t ret = 0; struct qla_hw_data *ha = vha->hw; + struct rsp_que *rsp; options |= BIT_1; - ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); + ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1); if (!ret) { qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); return ret; } else qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); + rsp = ha->rsp_q_map[ret]; options = 0; if (qos & BIT_7) @@ -759,10 +752,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos) ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, qos & ~BIT_7); if (ret) { - vha->req_ques[0] = ret; + vha->req = ha->req_q_map[ret]; qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); } else qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); + rsp->req = ha->req_q_map[ret]; return ret; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e4fdcdad80d0..dcf011679c8b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to report for target devices."); +int ql2xqfulltracking = 1; +module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xqfulltracking, + "Controls whether the driver tracks queue full status " + "returns and dynamically adjusts a scsi device's queue " + "depth. Default is 1, perform tracking. Set to 0 to " + "disable dynamic tracking and adjustment of queue depth."); + int ql2xqfullrampup = 120; module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xqfullrampup, @@ -96,6 +104,23 @@ MODULE_PARM_DESC(ql2xmaxqueues, "Enables MQ settings " "Default is 1 for single queue. Set it to number \ of queues in MQ mode."); + +int ql2xmultique_tag; +module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR); +MODULE_PARM_DESC(ql2xmultique_tag, + "Enables CPU affinity settings for the driver " + "Default is 0 for no affinity of request and response IO. " + "Set it to 1 to turn on the cpu affinity."); + +int ql2xfwloadbin; +module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); +MODULE_PARM_DESC(ql2xfwloadbin, + "Option to specify location from which to load ISP firmware:\n" + " 2 -- load firmware via the request_firmware() (hotplug)\n" + " interface.\n" + " 1 -- load firmware from flash.\n" + " 0 -- use default semantics.\n"); + /* * SCSI host template entry points */ @@ -187,7 +212,7 @@ static void qla2x00_sp_free_dma(srb_t *); /* -------------------------------------------------------------------------- */ static int qla2x00_alloc_queues(struct qla_hw_data *ha) { - ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, + ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, GFP_KERNEL); if (!ha->req_q_map) { qla_printk(KERN_WARNING, ha, @@ -195,7 +220,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha) goto fail_req_map; } - ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, + ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, GFP_KERNEL); if (!ha->rsp_q_map) { qla_printk(KERN_WARNING, ha, @@ -213,16 +238,8 @@ fail_req_map: return -ENOMEM; } -static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, - struct rsp_que *rsp) +static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) { - if (rsp && rsp->ring) - dma_free_coherent(&ha->pdev->dev, - (rsp->length + 1) * sizeof(response_t), - rsp->ring, rsp->dma); - - kfree(rsp); - rsp = NULL; if (req && req->ring) dma_free_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), @@ -232,22 +249,77 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, req = NULL; } +static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) +{ + if (rsp && rsp->ring) + dma_free_coherent(&ha->pdev->dev, + (rsp->length + 1) * sizeof(response_t), + rsp->ring, rsp->dma); + + kfree(rsp); + rsp = NULL; +} + static void qla2x00_free_queues(struct qla_hw_data *ha) { struct req_que *req; struct rsp_que *rsp; int cnt; - for (cnt = 0; cnt < ha->max_queues; cnt++) { - rsp = ha->rsp_q_map[cnt]; + for (cnt = 0; cnt < ha->max_req_queues; cnt++) { req = ha->req_q_map[cnt]; - qla2x00_free_que(ha, req, rsp); + qla2x00_free_req_que(ha, req); + } + kfree(ha->req_q_map); + ha->req_q_map = NULL; + + for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { + rsp = ha->rsp_q_map[cnt]; + qla2x00_free_rsp_que(ha, rsp); } kfree(ha->rsp_q_map); ha->rsp_q_map = NULL; +} - kfree(ha->req_q_map); - ha->req_q_map = NULL; +static int qla25xx_setup_mode(struct scsi_qla_host *vha) +{ + uint16_t options = 0; + int ques, req, ret; + struct qla_hw_data *ha = vha->hw; + + if (ql2xmultique_tag) { + /* CPU affinity mode */ + ha->wq = create_workqueue("qla2xxx_wq"); + /* create a request queue for IO */ + options |= BIT_7; + req = qla25xx_create_req_que(ha, options, 0, 0, -1, + QLA_DEFAULT_QUE_QOS); + if (!req) { + qla_printk(KERN_WARNING, ha, + "Can't create request queue\n"); + goto fail; + } + vha->req = ha->req_q_map[req]; + options |= BIT_1; + for (ques = 1; ques < ha->max_rsp_queues; ques++) { + ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); + if (!ret) { + qla_printk(KERN_WARNING, ha, + "Response Queue create failed\n"); + goto fail2; + } + } + DEBUG2(qla_printk(KERN_INFO, ha, + "CPU affinity mode enabled, no. of response" + " queues:%d, no. of request queues:%d\n", + ha->max_rsp_queues, ha->max_req_queues)); + } + return 0; +fail2: + qla25xx_delete_queues(vha); +fail: + ha->mqenable = 0; + return 1; } static char * @@ -387,7 +459,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, sp->fcport = fcport; sp->cmd = cmd; - sp->que = ha->req_q_map[0]; sp->flags = 0; CMD_SP(cmd) = (void *)sp; cmd->scsi_done = done; @@ -612,7 +683,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha) void qla2x00_abort_fcport_cmds(fc_port_t *fcport) { - int cnt, que, id; + int cnt; unsigned long flags; srb_t *sp; scsi_qla_host_t *vha = fcport->vha; @@ -620,32 +691,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport) struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); - for (que = 0; que < QLA_MAX_HOST_QUES; que++) { - id = vha->req_ques[que]; - req = ha->req_q_map[id]; - if (!req) + req = vha->req; + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (!sp) + continue; + if (sp->fcport != fcport) continue; - for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { - sp = req->outstanding_cmds[cnt]; - if (!sp) - continue; - if (sp->fcport != fcport) - continue; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(vha, sp, req)) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(sp)) { + DEBUG2(qla_printk(KERN_WARNING, ha, + "Abort failed -- %lx\n", + sp->cmd->serial_number)); + } else { + if (qla2x00_eh_wait_on_command(sp->cmd) != + QLA_SUCCESS) DEBUG2(qla_printk(KERN_WARNING, ha, - "Abort failed -- %lx\n", + "Abort failed while waiting -- %lx\n", sp->cmd->serial_number)); - } else { - if (qla2x00_eh_wait_on_command(sp->cmd) != - QLA_SUCCESS) - DEBUG2(qla_printk(KERN_WARNING, ha, - "Abort failed while waiting -- %lx\n", - sp->cmd->serial_number)); - } - spin_lock_irqsave(&ha->hardware_lock, flags); } + spin_lock_irqsave(&ha->hardware_lock, flags); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -693,7 +759,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) unsigned long flags; int wait = 0; struct qla_hw_data *ha = vha->hw; - struct req_que *req; + struct req_que *req = vha->req; srb_t *spt; qla2x00_block_error_handler(cmd); @@ -709,7 +775,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) spt = (srb_t *) CMD_SP(cmd); if (!spt) return SUCCESS; - req = spt->que; /* Check active list for command command. */ spin_lock_irqsave(&ha->hardware_lock, flags); @@ -726,7 +791,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) " pid=%ld.\n", __func__, vha->host_no, sp, serial)); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(vha, sp, req)) { + if (ha->isp_ops->abort_command(sp)) { DEBUG2(printk("%s(%ld): abort_command " "mbx failed.\n", __func__, vha->host_no)); ret = FAILED; @@ -777,7 +842,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, return status; spin_lock_irqsave(&ha->hardware_lock, flags); - req = sp->que; + req = vha->req; for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { sp = req->outstanding_cmds[cnt]; @@ -820,7 +885,7 @@ static char *reset_errors[] = { static int __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, - struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) + struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int)) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; @@ -841,7 +906,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) goto eh_reset_failed; err = 2; - if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) + if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) + != QLA_SUCCESS) goto eh_reset_failed; err = 3; if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, @@ -996,6 +1062,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) if (qla2x00_vp_abort_isp(vha)) goto eh_host_reset_lock; } else { + if (ha->wq) + flush_workqueue(ha->wq); + set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (qla2x00_abort_isp(base_vha)) { clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); @@ -1037,7 +1106,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; - if (ha->flags.enable_lip_full_login && !vha->vp_idx) { + if (ha->flags.enable_lip_full_login && !vha->vp_idx && + !IS_QLA81XX(ha)) { ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) { DEBUG2_3(printk("%s(%ld): failed: " @@ -1064,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) if (fcport->port_type != FCT_TARGET) continue; - ret = ha->isp_ops->target_reset(fcport, 0); + ret = ha->isp_ops->target_reset(fcport, 0, 0); if (ret != QLA_SUCCESS) { DEBUG2_3(printk("%s(%ld): bus_reset failed: " "target_reset=%d d_id=%x.\n", __func__, @@ -1088,7 +1158,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); - for (que = 0; que < ha->max_queues; que++) { + for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; @@ -1123,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev) scsi_qla_host_t *vha = shost_priv(sdev->host); struct qla_hw_data *ha = vha->hw; struct fc_rport *rport = starget_to_rport(sdev->sdev_target); - struct req_que *req = ha->req_q_map[vha->req_ques[0]]; + struct req_que *req = vha->req; if (sdev->tagged_supported) scsi_activate_tcq(sdev, req->max_q_depth); @@ -1511,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; } + + /* Get adapter physical port no from interrupt pin register. */ + pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); + if (ha->port_no & 1) + ha->flags.port0 = 1; + else + ha->flags.port0 = 0; } static int @@ -1518,6 +1595,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha) { resource_size_t pio; uint16_t msix; + int cpus; if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { @@ -1571,8 +1649,9 @@ skip_pio: } /* Determine queue resources */ - ha->max_queues = 1; - if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) + ha->max_req_queues = ha->max_rsp_queues = 1; + if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) && + (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), pci_resource_len(ha->pdev, 3)); @@ -1582,18 +1661,24 @@ skip_pio: ha->msix_count = msix; /* Max queues are bounded by available msix vectors */ /* queue 0 uses two msix vectors */ - if (ha->msix_count - 1 < ql2xmaxqueues) - ha->max_queues = ha->msix_count - 1; - else if (ql2xmaxqueues > QLA_MQ_SIZE) - ha->max_queues = QLA_MQ_SIZE; - else - ha->max_queues = ql2xmaxqueues; + if (ql2xmultique_tag) { + cpus = num_online_cpus(); + ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ? + (cpus + 1) : (ha->msix_count - 1); + ha->max_req_queues = 2; + } else if (ql2xmaxqueues > 1) { + ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? + QLA_MQ_SIZE : ql2xmaxqueues; + DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" + " of request queues:%d\n", ha->max_req_queues)); + } qla_printk(KERN_INFO, ha, "MSI-X vector count: %d\n", msix); - } + } else + qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); mqiobase_exit: - ha->msix_count = ha->max_queues + 1; + ha->msix_count = ha->max_rsp_queues + 1; return (0); iospace_error_exit: @@ -1605,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); + if (vha->hw->flags.running_gold_fw) + return; + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(RSCN_UPDATE, &vha->dpc_flags); @@ -1768,6 +1856,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_81XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla81xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; @@ -1803,14 +1892,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ret = -ENOMEM; qla2x00_mem_free(ha); - qla2x00_free_que(ha, req, rsp); + qla2x00_free_req_que(ha, req); + qla2x00_free_rsp_que(ha, rsp); goto probe_hw_failed; } pci_set_drvdata(pdev, base_vha); host = base_vha->host; - base_vha->req_ques[0] = req->id; + base_vha->req = req; host->can_queue = req->length + 128; if (IS_QLA2XXX_MIDTYPE(ha)) base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; @@ -1841,7 +1931,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) } ha->rsp_q_map[0] = rsp; ha->req_q_map[0] = req; - + rsp->req = req; + req->rsp = rsp; + set_bit(0, ha->req_qid_map); + set_bit(0, ha->rsp_qid_map); /* FWI2-capable only. */ req->req_q_in = &ha->iobase->isp24.req_q_in; req->req_q_out = &ha->iobase->isp24.req_q_out; @@ -1866,6 +1959,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_failed; } + if (ha->mqenable) + if (qla25xx_setup_mode(base_vha)) + qla_printk(KERN_WARNING, ha, + "Can't create queues, falling back to single" + " queue mode\n"); + + if (ha->flags.running_gold_fw) + goto skip_dpc; + /* * Startup the kernel thread for this host adapter */ @@ -1878,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_failed; } +skip_dpc: list_add_tail(&base_vha->list, &ha->vp_list); base_vha->host->irq = ha->pdev->irq; @@ -1917,8 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; probe_init_failed: - qla2x00_free_que(ha, req, rsp); - ha->max_queues = 0; + qla2x00_free_req_que(ha, req); + qla2x00_free_rsp_que(ha, rsp); + ha->max_req_queues = ha->max_rsp_queues = 0; probe_failed: if (base_vha->timer_active) @@ -1976,6 +2080,13 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha->flags.online = 0; + /* Flush the work queue and remove it */ + if (ha->wq) { + flush_workqueue(ha->wq); + destroy_workqueue(ha->wq); + ha->wq = NULL; + } + /* Kill the kernel thread for this host */ if (ha->dpc_thread) { struct task_struct *t = ha->dpc_thread; @@ -2017,6 +2128,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + qla25xx_delete_queues(vha); + if (ha->flags.fce_enabled) qla2x00_disable_fce_trace(vha, NULL, NULL); @@ -2329,6 +2442,14 @@ qla2x00_mem_free(struct qla_hw_data *ha) vfree(ha->fw_dump); } + if (ha->dcbx_tlv) + dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, + ha->dcbx_tlv, ha->dcbx_tlv_dma); + + if (ha->xgmac_data) + dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, + ha->xgmac_data, ha->xgmac_data_dma); + if (ha->sns_cmd) dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), ha->sns_cmd, ha->sns_cmd_dma); @@ -2412,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->work_list); INIT_LIST_HEAD(&vha->list); + spin_lock_init(&vha->work_lock); + sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); return vha; @@ -2420,13 +2543,11 @@ fail: } static struct qla_work_evt * -qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, - int locked) +qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) { struct qla_work_evt *e; - e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: - GFP_KERNEL); + e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); if (!e) return NULL; @@ -2437,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, } static int -qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked) +qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) { - unsigned long uninitialized_var(flags); - struct qla_hw_data *ha = vha->hw; + unsigned long flags; - if (!locked) - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(&vha->work_lock, flags); list_add_tail(&e->list, &vha->work_list); + spin_unlock_irqrestore(&vha->work_lock, flags); qla2xxx_wake_dpc(vha); - if (!locked) - spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_SUCCESS; } @@ -2457,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, { struct qla_work_evt *e; - e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1); + e = qla2x00_alloc_work(vha, QLA_EVT_AEN); if (!e) return QLA_FUNCTION_FAILED; e->u.aen.code = code; e->u.aen.data = data; - return qla2x00_post_work(vha, e, 1); + return qla2x00_post_work(vha, e); } int @@ -2471,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) { struct qla_work_evt *e; - e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); + e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); if (!e) return QLA_FUNCTION_FAILED; memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); - return qla2x00_post_work(vha, e, 1); + return qla2x00_post_work(vha, e); } static void qla2x00_do_work(struct scsi_qla_host *vha) { - struct qla_work_evt *e; - struct qla_hw_data *ha = vha->hw; + struct qla_work_evt *e, *tmp; + unsigned long flags; + LIST_HEAD(work); - spin_lock_irq(&ha->hardware_lock); - while (!list_empty(&vha->work_list)) { - e = list_entry(vha->work_list.next, struct qla_work_evt, list); + spin_lock_irqsave(&vha->work_lock, flags); + list_splice_init(&vha->work_list, &work); + spin_unlock_irqrestore(&vha->work_lock, flags); + + list_for_each_entry_safe(e, tmp, &work, list) { list_del_init(&e->list); - spin_unlock_irq(&ha->hardware_lock); switch (e->type) { case QLA_EVT_AEN: @@ -2502,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); - spin_lock_irq(&ha->hardware_lock); } - spin_unlock_irq(&ha->hardware_lock); } + /* Relogins all the fcports of a vport * Context: dpc thread */ diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 152ecfc26cd2..6260505dceb5 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", - __func__, vha->host_no)); + DEBUG9_10(qla_printk(KERN_WARNING, ha, + "NVRAM didn't go ready...\n")); break; } NVRAM_DELAY(); @@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk( + DEBUG9_10(qla_printk(KERN_WARNING, ha, "NVRAM didn't go ready...\n")); break; } @@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk("NVRAM didn't go ready...\n")); + DEBUG9_10(qla_printk(KERN_WARNING, ha, + "NVRAM didn't go ready...\n")); break; } NVRAM_DELAY(); @@ -701,32 +702,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) break; case FLT_REG_VPD_0: ha->flt_region_vpd_nvram = start; - if (!(PCI_FUNC(ha->pdev->devfn) & 1)) + if (ha->flags.port0) ha->flt_region_vpd = start; break; case FLT_REG_VPD_1: - if (PCI_FUNC(ha->pdev->devfn) & 1) + if (!ha->flags.port0) ha->flt_region_vpd = start; break; case FLT_REG_NVRAM_0: - if (!(PCI_FUNC(ha->pdev->devfn) & 1)) + if (ha->flags.port0) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_1: - if (PCI_FUNC(ha->pdev->devfn) & 1) + if (!ha->flags.port0) ha->flt_region_nvram = start; break; case FLT_REG_FDT: ha->flt_region_fdt = start; break; case FLT_REG_NPIV_CONF_0: - if (!(PCI_FUNC(ha->pdev->devfn) & 1)) + if (ha->flags.port0) ha->flt_region_npiv_conf = start; break; case FLT_REG_NPIV_CONF_1: - if (PCI_FUNC(ha->pdev->devfn) & 1) + if (!ha->flags.port0) ha->flt_region_npiv_conf = start; break; + case FLT_REG_GOLD_FW: + ha->flt_region_gold_fw = start; + break; } } goto done; @@ -744,12 +748,12 @@ no_flash_data: ha->flt_region_fw = def_fw[def]; ha->flt_region_boot = def_boot[def]; ha->flt_region_vpd_nvram = def_vpd_nvram[def]; - ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ? + ha->flt_region_vpd = ha->flags.port0 ? def_vpd0[def]: def_vpd1[def]; - ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ? + ha->flt_region_nvram = ha->flags.port0 ? def_nvram0[def]: def_nvram1[def]; ha->flt_region_fdt = def_fdt[def]; - ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ? + ha->flt_region_npiv_conf = ha->flags.port0 ? def_npiv_conf0[def]: def_npiv_conf1[def]; done: DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " @@ -924,6 +928,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) struct fc_vport_identifiers vid; struct fc_vport *vport; + memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); + flags = le16_to_cpu(entry->flags); if (flags == 0xffff) continue; @@ -937,9 +943,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) vid.port_name = wwn_to_u64(entry->port_name); vid.node_name = wwn_to_u64(entry->node_name); - memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); - - DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " + DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), entry->q_qos, entry->f_qos)); @@ -955,7 +959,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) } done: kfree(data); - ha->npiv_info = NULL; } static int @@ -1079,8 +1082,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 0xff0000) | ((fdata >> 16) & 0xff)); ret = qla24xx_erase_sector(vha, fdata); if (ret != QLA_SUCCESS) { - DEBUG9(qla_printk("Unable to erase sector: " - "address=%x.\n", faddr)); + DEBUG9(qla_printk(KERN_WARNING, ha, + "Unable to erase sector: address=%x.\n", + faddr)); break; } } @@ -1240,8 +1244,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, ret = qla24xx_write_flash_dword(ha, nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); if (ret != QLA_SUCCESS) { - DEBUG9(qla_printk("Unable to program nvram address=%x " - "data=%x.\n", naddr, *dwptr)); + DEBUG9(qla_printk(KERN_WARNING, ha, + "Unable to program nvram address=%x data=%x.\n", + naddr, *dwptr)); break; } } diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 19d1afc3a343..b63feaf43126 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k1" +#define QLA2XXX_VERSION "8.03.01-k3" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 166417a6afba..2de5f3ad640b 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target); * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * - * Description: Looks up the scsi_device with the specified @channel, @id, @lun - * for a given host. The returned scsi_device has an additional reference that + * Description: Looks up the scsi_device with the specified @lun for a given + * @starget. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 213123b0486b..41a21772df12 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp, static sector_t get_sdebug_capacity(void) { if (scsi_debug_virtual_gb > 0) - return 2048 * 1024 * scsi_debug_virtual_gb; + return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb; else return sdebug_store_sectors; } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 0c2c73be1974..a1689353d7fd 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd); /** * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory * @scmd: SCSI command structure to restore - * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd + * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd * - * Undo any damage done by above scsi_prep_eh_cmnd(). + * Undo any damage done by above scsi_eh_prep_cmnd(). */ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) { @@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate) * @sdev: SCSI device to prevent medium removal * * Locking: - * We must be called from process context; scsi_allocate_request() - * may sleep. + * We must be called from process context. * * Notes: * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the * head of the devices request queue, and continue. - * - * Bugs: - * scsi_allocate_request() may sleep waiting for existing requests to - * be processed. However, since we haven't kicked off any request - * processing for this host, this may deadlock. - * - * If scsi_allocate_request() fails for what ever reason, we - * completely forget to lock the door. */ static void scsi_eh_lock_door(struct scsi_device *sdev) { struct request *req; + /* + * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a + * request becomes available + */ req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); - if (!req) - return; req->cmd[0] = ALLOW_MEDIUM_REMOVAL; req->cmd[1] = 0; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dd3f9d2b99fd..30f3275e119e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2412,20 +2412,18 @@ int scsi_internal_device_unblock(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; - int err; unsigned long flags; /* * Try to transition the scsi device to SDEV_RUNNING * and goose the device queue if successful. */ - err = scsi_device_set_state(sdev, SDEV_RUNNING); - if (err) { - err = scsi_device_set_state(sdev, SDEV_CREATED); - - if (err) - return err; - } + if (sdev->sdev_state == SDEV_BLOCK) + sdev->sdev_state = SDEV_RUNNING; + else if (sdev->sdev_state == SDEV_CREATED_BLOCK) + sdev->sdev_state = SDEV_CREATED; + else + return -EINVAL; spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index e2b50d8f57a8..c44783801402 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns, "REPORT LUNS maximum number of LUNS received (should be" " between 1 and 16384)"); -static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; +static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(inq_timeout, "Timeout (in seconds) waiting for devices to answer INQUIRY." - " Default is 5. Some non-compliant devices need more."); + " Default is 20. Some devices may need more; most need less."); /* This lock protects only this list */ static DEFINE_SPINLOCK(async_scan_lock); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 0a2ce7b6325c..f3e664628d7a 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -37,7 +37,6 @@ #define ISCSI_TRANSPORT_VERSION "2.0-870" struct iscsi_internal { - int daemon_pid; struct scsi_transport_template t; struct iscsi_transport *iscsi_transport; struct list_head list; @@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt) } static int -iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) +iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) { - return netlink_broadcast(nls, skb, 0, 1, gfp); -} - -static int -iscsi_unicast_skb(struct sk_buff *skb, int pid) -{ - int rc; - - rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT); - if (rc < 0) { - printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc); - return rc; - } - - return 0; + return nlmsg_multicast(nls, skb, 0, group, gfp); } int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, @@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, return -ENOMEM; } - nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); memset(ev, 0, sizeof(*ev)); ev->transport_handle = iscsi_handle(conn->transport); @@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); - return iscsi_unicast_skb(skb, priv->daemon_pid); + return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); } EXPORT_SYMBOL_GPL(iscsi_recv_pdu); +int iscsi_offload_mesg(struct Scsi_Host *shost, + struct iscsi_transport *transport, uint32_t type, + char *data, uint16_t data_size) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + int len = NLMSG_SPACE(sizeof(*ev) + data_size); + + skb = alloc_skb(len, GFP_NOIO); + if (!skb) { + printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); + return -ENOMEM; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = NLMSG_DATA(nlh); + memset(ev, 0, sizeof(*ev)); + ev->type = type; + ev->transport_handle = iscsi_handle(transport); + switch (type) { + case ISCSI_KEVENT_PATH_REQ: + ev->r.req_path.host_no = shost->host_no; + break; + case ISCSI_KEVENT_IF_DOWN: + ev->r.notify_if_down.host_no = shost->host_no; + break; + } + + memcpy((char *)ev + sizeof(*ev), data, data_size); + + return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO); +} +EXPORT_SYMBOL_GPL(iscsi_offload_mesg); + void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; @@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) return; } - nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_ERROR; @@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); - iscsi_broadcast_skb(skb, GFP_ATOMIC); + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", error); @@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) EXPORT_SYMBOL_GPL(iscsi_conn_error_event); static int -iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, - void *payload, int size) +iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, + void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; @@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, return -ENOMEM; } - nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); nlh->nlmsg_flags = flags; memcpy(NLMSG_DATA(nlh), payload, size); - return iscsi_unicast_skb(skb, pid); + return iscsi_multicast_skb(skb, group, GFP_ATOMIC); } static int @@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) return -ENOMEM; } - nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0, + nlhstat = __nlmsg_put(skbstat, 0, 0, 0, (len - sizeof(*nlhstat)), 0); evstat = NLMSG_DATA(nlhstat); memset(evstat, 0, sizeof(*evstat)); @@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size; - err = iscsi_unicast_skb(skbstat, priv->daemon_pid); + err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID, + GFP_ATOMIC); } while (err < 0 && err != -ECONNREFUSED); return err; @@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session, return -ENOMEM; } - nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); ev->transport_handle = iscsi_handle(session->transport); @@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session, * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_broadcast_skb(skb, GFP_KERNEL); + rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); if (rc == -ESRCH) iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " @@ -1268,26 +1289,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) return err; } +static int iscsi_if_ep_connect(struct iscsi_transport *transport, + struct iscsi_uevent *ev, int msg_type) +{ + struct iscsi_endpoint *ep; + struct sockaddr *dst_addr; + struct Scsi_Host *shost = NULL; + int non_blocking, err = 0; + + if (!transport->ep_connect) + return -EINVAL; + + if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) { + shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no); + if (!shost) { + printk(KERN_ERR "ep connect failed. Could not find " + "host no %u\n", + ev->u.ep_connect_through_host.host_no); + return -ENODEV; + } + non_blocking = ev->u.ep_connect_through_host.non_blocking; + } else + non_blocking = ev->u.ep_connect.non_blocking; + + dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); + ep = transport->ep_connect(shost, dst_addr, non_blocking); + if (IS_ERR(ep)) { + err = PTR_ERR(ep); + goto release_host; + } + + ev->r.ep_connect_ret.handle = ep->id; +release_host: + if (shost) + scsi_host_put(shost); + return err; +} + static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) { struct iscsi_endpoint *ep; - struct sockaddr *dst_addr; int rc = 0; switch (msg_type) { + case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: - if (!transport->ep_connect) - return -EINVAL; - - dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); - ep = transport->ep_connect(dst_addr, - ev->u.ep_connect.non_blocking); - if (IS_ERR(ep)) - return PTR_ERR(ep); - - ev->r.ep_connect_ret.handle = ep->id; + rc = iscsi_if_ep_connect(transport, ev, msg_type); break; case ISCSI_UEVENT_TRANSPORT_EP_POLL: if (!transport->ep_poll) @@ -1365,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport, } static int -iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) +iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev) +{ + struct Scsi_Host *shost; + struct iscsi_path *params; + int err; + + if (!transport->set_path) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.set_path.host_no); + if (!shost) { + printk(KERN_ERR "set path could not find host no %u\n", + ev->u.set_path.host_no); + return -ENODEV; + } + + params = (struct iscsi_path *)((char *)ev + sizeof(*ev)); + err = transport->set_path(shost, params); + + scsi_host_put(shost); + return err; +} + +static int +iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; struct iscsi_uevent *ev = NLMSG_DATA(nlh); @@ -1375,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; + if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) + *group = ISCSI_NL_GRP_UIP; + else + *group = ISCSI_NL_GRP_ISCSID; + priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); if (!priv) return -EINVAL; @@ -1383,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (!try_module_get(transport->owner)) return -EINVAL; - priv->daemon_pid = NETLINK_CREDS(skb)->pid; - switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, @@ -1469,6 +1545,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: case ISCSI_UEVENT_TRANSPORT_EP_POLL: case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: + case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); break; case ISCSI_UEVENT_TGT_DSCVR: @@ -1477,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) case ISCSI_UEVENT_SET_HOST_PARAM: err = iscsi_set_host_param(transport, ev); break; + case ISCSI_UEVENT_PATH_UPDATE: + err = iscsi_set_path(transport, ev); + break; default: err = -ENOSYS; break; @@ -1499,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb) uint32_t rlen; struct nlmsghdr *nlh; struct iscsi_uevent *ev; + uint32_t group; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || @@ -1511,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb) if (rlen > skb->len) rlen = skb->len; - err = iscsi_if_recv_msg(skb, nlh); + err = iscsi_if_recv_msg(skb, nlh, &group); if (err) { ev->type = ISCSI_KEVENT_IF_ERROR; ev->iferror = err; @@ -1525,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb) */ if (ev->type == ISCSI_UEVENT_GET_STATS && !err) break; - err = iscsi_if_send_reply( - NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq, + err = iscsi_if_send_reply(group, nlh->nlmsg_seq, nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); } while (err < 0 && err != -ECONNREFUSED); skb_pull(skb, rlen); @@ -1774,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt) if (!priv) return NULL; INIT_LIST_HEAD(&priv->list); - priv->daemon_pid = -1; priv->iscsi_transport = tt; priv->t.user_scan = iscsi_user_scan; priv->t.create_work_queue = 1; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bcf3bd40bbd5..878b17a9af30 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1902,24 +1902,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) index = sdkp->index; dev = &sdp->sdev_gendev; - if (!sdp->request_queue->rq_timeout) { - if (sdp->type != TYPE_MOD) - blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); - else - blk_queue_rq_timeout(sdp->request_queue, - SD_MOD_TIMEOUT); - } - - device_initialize(&sdkp->dev); - sdkp->dev.parent = &sdp->sdev_gendev; - sdkp->dev.class = &sd_disk_class; - dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev)); - - if (device_add(&sdkp->dev)) - goto out_free_index; - - get_device(&sdp->sdev_gendev); - if (index < SD_MAX_DISKS) { gd->major = sd_major((index & 0xf0) >> 4); gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); @@ -1954,11 +1936,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); - - return; - - out_free_index: - ida_remove(&sd_index_ida, index); } /** @@ -2026,6 +2003,24 @@ static int sd_probe(struct device *dev) sdkp->openers = 0; sdkp->previous_state = 1; + if (!sdp->request_queue->rq_timeout) { + if (sdp->type != TYPE_MOD) + blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); + else + blk_queue_rq_timeout(sdp->request_queue, + SD_MOD_TIMEOUT); + } + + device_initialize(&sdkp->dev); + sdkp->dev.parent = &sdp->sdev_gendev; + sdkp->dev.class = &sd_disk_class; + dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev)); + + if (device_add(&sdkp->dev)) + goto out_free_index; + + get_device(&sdp->sdev_gendev); + async_schedule(sd_probe_async, sdkp); return 0; @@ -2055,8 +2050,10 @@ static int sd_probe(struct device *dev) **/ static int sd_remove(struct device *dev) { - struct scsi_disk *sdkp = dev_get_drvdata(dev); + struct scsi_disk *sdkp; + async_synchronize_full(); + sdkp = dev_get_drvdata(dev); device_del(&sdkp->dev); del_gendisk(sdkp->disk); sd_shutdown(dev); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 89bd438e1fe3..b33d04250bbc 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon !(STp->use_pf & PF_TESTED)) { /* Try the other possible state of Page Format if not already tried */ - STp->use_pf = !STp->use_pf | PF_TESTED; + STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; st_release_request(SRpnt); SRpnt = NULL; return st_int_ioctl(STp, cmd_in, arg); diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 583966ec8266..45374d66d26a 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev) struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp; + unsigned long flags; + int error; if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) return -ENXIO; - tp->starget = sdev->sdev_target; + spin_lock_irqsave(np->s.host->host_lock, flags); + /* * Fail the device init if the device is flagged NOSCAN at BOOT in * the NVRAM. This may speed up boot and maintain coherency with @@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev) if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; - starget_printk(KERN_INFO, tp->starget, + starget_printk(KERN_INFO, sdev->sdev_target, "Scan at boot disabled in NVRAM\n"); - return -ENXIO; + error = -ENXIO; + goto out; } if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { - if (sdev->lun != 0) - return -ENXIO; - starget_printk(KERN_INFO, tp->starget, + if (sdev->lun != 0) { + error = -ENXIO; + goto out; + } + starget_printk(KERN_INFO, sdev->sdev_target, "Multiple LUNs disabled in NVRAM\n"); } lp = sym_alloc_lcb(np, sdev->id, sdev->lun); - if (!lp) - return -ENOMEM; + if (!lp) { + error = -ENOMEM; + goto out; + } + if (tp->nlcb == 1) + tp->starget = sdev->sdev_target; spi_min_period(tp->starget) = tp->usr_period; spi_max_width(tp->starget) = tp->usr_width; - return 0; + error = 0; +out: + spin_unlock_irqrestore(np->s.host->host_lock, flags); + + return error; } /* @@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev) static void sym53c8xx_slave_destroy(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); - struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); + struct sym_tcb *tp = &np->target[sdev->id]; + struct sym_lcb *lp = sym_lp(tp, sdev->lun); + unsigned long flags; + + spin_lock_irqsave(np->s.host->host_lock, flags); + + if (lp->busy_itlq || lp->busy_itl) { + /* + * This really shouldn't happen, but we can't return an error + * so let's try to stop all on-going I/O. + */ + starget_printk(KERN_WARNING, tp->starget, + "Removing busy LCB (%d)\n", sdev->lun); + sym_reset_scsi_bus(np, 1); + } - if (lp->itlq_tbl) - sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); - kfree(lp->cb_tags); - sym_mfree_dma(lp, sizeof(*lp), "LCB"); + if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) { + /* + * It was the last unit for this target. + */ + tp->head.sval = 0; + tp->head.wval = np->rv_scntl3; + tp->head.uval = 0; + tp->tgoal.check_nego = 1; + tp->starget = NULL; + } + + spin_unlock_irqrestore(np->s.host->host_lock, flags); } /* @@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) if (!((uc->target >> t) & 1)) continue; tp = &np->target[t]; + if (!tp->nlcb) + continue; switch (uc->cmd) { diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index ffa70d1ed182..69ad4945c936 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason) tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; + if (tp->lun0p) + tp->lun0p->to_clear = 0; + if (tp->lunmp) { + int ln; + + for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) + if (tp->lunmp[ln]) + tp->lunmp[ln]->to_clear = 0; + } } /* @@ -4988,7 +4997,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) */ if (ln && !tp->lunmp) { tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), - GFP_KERNEL); + GFP_ATOMIC); if (!tp->lunmp) goto fail; } @@ -5008,6 +5017,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } + tp->nlcb++; /* * Let the itl task point to error handling. @@ -5085,6 +5095,43 @@ fail: } /* + * Lun control block deallocation. Returns the number of valid remaing LCBs + * for the target. + */ +int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln) +{ + struct sym_tcb *tp = &np->target[tn]; + struct sym_lcb *lp = sym_lp(tp, ln); + + tp->nlcb--; + + if (ln) { + if (!tp->nlcb) { + kfree(tp->lunmp); + sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); + tp->lunmp = NULL; + tp->luntbl = NULL; + tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); + } else { + tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa)); + tp->lunmp[ln] = NULL; + } + } else { + tp->lun0p = NULL; + tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); + } + + if (lp->itlq_tbl) { + sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); + kfree(lp->cb_tags); + } + + sym_mfree_dma(lp, sizeof(*lp), "LCB"); + + return tp->nlcb; +} + +/* * Queue a SCSI IO to the controller. */ int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp) diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h index 9ebc8706b6bf..053e63c86822 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.h +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h @@ -401,6 +401,7 @@ struct sym_tcb { * An array of bus addresses is used on reselection. */ u32 *luntbl; /* LCBs bus address table */ + int nlcb; /* Number of valid LCBs (including LUN #0) */ /* * LUN table used by the C code. @@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); +int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln); int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); int sym_reset_scsi_target(struct sym_hcb *np, int target); |