summaryrefslogtreecommitdiff
path: root/drivers/scsi/megaraid
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/megaraid')
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h161
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1099
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c327
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c439
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h105
5 files changed, 1628 insertions, 503 deletions
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 32166c2c7854..a49914de4b95 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.803.01.00-rc1"
-#define MEGASAS_RELDATE "Mar. 10, 2014"
-#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014"
+#define MEGASAS_VERSION "06.805.06.00-rc1"
+#define MEGASAS_RELDATE "Sep. 4, 2014"
+#define MEGASAS_EXT_VERSION "Thu. Sep. 4 17:00:00 PDT 2014"
/*
* Device IDs
@@ -105,6 +105,9 @@
#define MFI_STATE_READY 0xB0000000
#define MFI_STATE_OPERATIONAL 0xC0000000
#define MFI_STATE_FAULT 0xF0000000
+#define MFI_STATE_FORCE_OCR 0x00000080
+#define MFI_STATE_DMADONE 0x00000008
+#define MFI_STATE_CRASH_DUMP_DONE 0x00000004
#define MFI_RESET_REQUIRED 0x00000001
#define MFI_RESET_ADAPTER 0x00000002
#define MEGAMFI_FRAME_SIZE 64
@@ -191,6 +194,9 @@
#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
#define MR_DCMD_PD_LIST_QUERY 0x02010100
+#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100
+#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600)
+
/*
* Global functions
*/
@@ -264,6 +270,25 @@ enum MFI_STAT {
};
/*
+ * Crash dump related defines
+ */
+#define MAX_CRASH_DUMP_SIZE 512
+#define CRASH_DMA_BUF_SIZE (1024 * 1024)
+
+enum MR_FW_CRASH_DUMP_STATE {
+ UNAVAILABLE = 0,
+ AVAILABLE = 1,
+ COPYING = 2,
+ COPIED = 3,
+ COPY_ERROR = 4,
+};
+
+enum _MR_CRASH_BUF_STATUS {
+ MR_CRASH_BUF_TURN_OFF = 0,
+ MR_CRASH_BUF_TURN_ON = 1,
+};
+
+/*
* Number of mailbox bytes in DCMD message frame
*/
#define MFI_MBOX_SIZE 12
@@ -365,7 +390,6 @@ enum MR_LD_QUERY_TYPE {
#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
#define MR_EVT_LD_OFFLINE 0x00fc
#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
-#define MAX_LOGICAL_DRIVES 64
enum MR_PD_STATE {
MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
@@ -443,14 +467,14 @@ struct MR_LD_LIST {
u8 state;
u8 reserved[3];
u64 size;
- } ldList[MAX_LOGICAL_DRIVES];
+ } ldList[MAX_LOGICAL_DRIVES_EXT];
} __packed;
struct MR_LD_TARGETID_LIST {
u32 size;
u32 count;
u8 pad[3];
- u8 targetId[MAX_LOGICAL_DRIVES];
+ u8 targetId[MAX_LOGICAL_DRIVES_EXT];
};
@@ -916,6 +940,15 @@ struct megasas_ctrl_info {
* HA cluster information
*/
struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:26;
+ u32 premiumFeatureMismatch:1;
+ u32 ctrlPropIncompatible:1;
+ u32 fwVersionMismatch:1;
+ u32 hwIncompatible:1;
+ u32 peerIsIncompatible:1;
+ u32 peerIsPresent:1;
+#else
u32 peerIsPresent:1;
u32 peerIsIncompatible:1;
u32 hwIncompatible:1;
@@ -923,6 +956,7 @@ struct megasas_ctrl_info {
u32 ctrlPropIncompatible:1;
u32 premiumFeatureMismatch:1;
u32 reserved:26;
+#endif
} cluster;
char clusterId[16]; /*7D4h */
@@ -933,7 +967,27 @@ struct megasas_ctrl_info {
u8 reserved; /*0x7E7*/
} iov;
- u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u32 reserved:25;
+ u32 supportCrashDump:1;
+ u32 supportMaxExtLDs:1;
+ u32 supportT10RebuildAssist:1;
+ u32 supportDisableImmediateIO:1;
+ u32 supportThermalPollInterval:1;
+ u32 supportPersonalityChange:2;
+#else
+ u32 supportPersonalityChange:2;
+ u32 supportThermalPollInterval:1;
+ u32 supportDisableImmediateIO:1;
+ u32 supportT10RebuildAssist:1;
+ u32 supportMaxExtLDs:1;
+ u32 supportCrashDump:1;
+ u32 reserved:25;
+#endif
+ } adapterOperations3;
+
+ u8 pad[0x800-0x7EC];
} __packed;
/*
@@ -942,13 +996,12 @@ struct megasas_ctrl_info {
* ===============================
*/
#define MEGASAS_MAX_PD_CHANNELS 2
-#define MEGASAS_MAX_LD_CHANNELS 1
+#define MEGASAS_MAX_LD_CHANNELS 2
#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
MEGASAS_MAX_LD_CHANNELS)
#define MEGASAS_MAX_DEV_PER_CHANNEL 128
#define MEGASAS_DEFAULT_INIT_ID -1
#define MEGASAS_MAX_LUN 8
-#define MEGASAS_MAX_LD 64
#define MEGASAS_DEFAULT_CMD_PER_LUN 256
#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
MEGASAS_MAX_DEV_PER_CHANNEL)
@@ -961,6 +1014,14 @@ struct megasas_ctrl_info {
#define MEGASAS_FW_BUSY 1
+#define VD_EXT_DEBUG 0
+
+enum MR_MFI_MPT_PTHR_FLAGS {
+ MFI_MPT_DETACHED = 0,
+ MFI_LIST_ADDED = 1,
+ MFI_MPT_ATTACHED = 2,
+};
+
/* Frame Type */
#define IO_FRAME 0
#define PTHRU_FRAME 1
@@ -978,7 +1039,7 @@ struct megasas_ctrl_info {
#define MEGASAS_IOCTL_CMD 0
#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
-
+#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
/*
* FW reports the maximum of number of commands that it can accept (maximum
* commands that can be outstanding) at any time. The driver must report a
@@ -1133,13 +1194,19 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:30;
+ u32 reserved:27;
+ u32 support_ndrive_r1_lb:1;
+ u32 support_max_255lds:1;
+ u32 reserved1:1;
u32 support_additional_msix:1;
u32 support_fp_remote_lun:1;
#else
u32 support_fp_remote_lun:1;
u32 support_additional_msix:1;
- u32 reserved:30;
+ u32 reserved1:1;
+ u32 support_max_255lds:1;
+ u32 support_ndrive_r1_lb:1;
+ u32 reserved:27;
#endif
} mfi_capabilities;
u32 reg;
@@ -1559,6 +1626,20 @@ struct megasas_instance {
u32 *reply_queue;
dma_addr_t reply_queue_h;
+ u32 *crash_dump_buf;
+ dma_addr_t crash_dump_h;
+ void *crash_buf[MAX_CRASH_DUMP_SIZE];
+ u32 crash_buf_pages;
+ unsigned int fw_crash_buffer_size;
+ unsigned int fw_crash_state;
+ unsigned int fw_crash_buffer_offset;
+ u32 drv_buf_index;
+ u32 drv_buf_alloc;
+ u32 crash_dump_fw_support;
+ u32 crash_dump_drv_support;
+ u32 crash_dump_app_support;
+ spinlock_t crashdump_lock;
+
struct megasas_register_set __iomem *reg_set;
u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
@@ -1577,7 +1658,7 @@ struct megasas_instance {
struct megasas_cmd **cmd_list;
struct list_head cmd_pool;
/* used to sync fire the cmd to fw */
- spinlock_t cmd_pool_lock;
+ spinlock_t mfi_pool_lock;
/* used to sync fire the cmd to fw */
spinlock_t hba_lock;
/* used to synch producer, consumer ptrs in dpc */
@@ -1606,6 +1687,7 @@ struct megasas_instance {
struct megasas_instance_template *instancet;
struct tasklet_struct isr_tasklet;
struct work_struct work_init;
+ struct work_struct crash_init;
u8 flag;
u8 unload;
@@ -1613,6 +1695,14 @@ struct megasas_instance {
u8 issuepend_done;
u8 disableOnlineCtrlReset;
u8 UnevenSpanSupport;
+
+ u8 supportmax256vd;
+ u16 fw_supported_vd_count;
+ u16 fw_supported_pd_count;
+
+ u16 drv_supported_vd_count;
+ u16 drv_supported_pd_count;
+
u8 adprecovery;
unsigned long last_time;
u32 mfiStatus;
@@ -1622,6 +1712,8 @@ struct megasas_instance {
/* Ptr to hba specific information */
void *ctrl_context;
+ u32 ctrl_context_pages;
+ struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
@@ -1633,8 +1725,6 @@ struct megasas_instance {
struct timer_list sriov_heartbeat_timer;
char skip_heartbeat_timer_del;
u8 requestorId;
- u64 initiator_sas_address;
- u64 ld_sas_address[64];
char PlasmaFW111;
char mpio;
int throttlequeuedepth;
@@ -1661,6 +1751,7 @@ struct MR_LD_VF_AFFILIATION {
/* Plasma 1.11 FW backward compatibility structures */
#define IOV_111_OFFSET 0x7CE
#define MAX_VIRTUAL_FUNCTIONS 8
+#define MR_LD_ACCESS_HIDDEN 15
struct IOV_111 {
u8 maxVFsSupported;
@@ -1754,6 +1845,11 @@ struct megasas_cmd {
struct list_head list;
struct scsi_cmnd *scmd;
+
+ void *mpt_pthr_cmd_blocked;
+ atomic_t mfi_mpt_pthr;
+ u8 is_wait_event;
+
struct megasas_instance *instance;
union {
struct {
@@ -1823,12 +1919,33 @@ u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+ struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo);
+int megasas_get_ctrl_info(struct megasas_instance *instance,
+ struct megasas_ctrl_info *ctrl_info);
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+ u8 crash_buf_state);
+void megasas_free_host_crash_buffer(struct megasas_instance *instance);
+void megasas_fusion_crash_dump_wq(struct work_struct *work);
+
+void megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd);
+int megasas_issue_blocked_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd, int timeout);
+void __megasas_return_cmd(struct megasas_instance *instance,
+ struct megasas_cmd *cmd);
+
+void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 22a04e37b70a..f6a69a3b1b3f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : 06.803.01.00-rc1
+ * Version : 06.805.06.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -89,6 +89,10 @@ module_param(resetwaittime, int, S_IRUGO);
MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
"before resetting adapter. Default: 180");
+int smp_affinity_enable = 1;
+module_param(smp_affinity_enable, int, S_IRUGO);
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -206,43 +210,66 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
unsigned long flags;
struct megasas_cmd *cmd = NULL;
- spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
if (!list_empty(&instance->cmd_pool)) {
cmd = list_entry((&instance->cmd_pool)->next,
struct megasas_cmd, list);
list_del_init(&cmd->list);
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED);
} else {
printk(KERN_ERR "megasas: Command pool empty!\n");
}
- spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
return cmd;
}
/**
- * megasas_return_cmd - Return a cmd to free command pool
+ * __megasas_return_cmd - Return a cmd to free command pool
* @instance: Adapter soft state
* @cmd: Command packet to be returned to free command pool
*/
inline void
-megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
{
- unsigned long flags;
-
- spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+ /*
+ * Don't go ahead and free the MFI frame, if corresponding
+ * MPT frame is not freed(valid for only fusion adapters).
+ * In case of MFI adapters, anyways for any allocated MFI
+ * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED
+ */
+ if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED)
+ return;
cmd->scmd = NULL;
cmd->frame_count = 0;
+ cmd->is_wait_event = 0;
+ cmd->mpt_pthr_cmd_blocked = NULL;
+
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
(reset_devices))
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
- list_add_tail(&cmd->list, &instance->cmd_pool);
- spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
+ list_add(&cmd->list, (&instance->cmd_pool)->next);
+}
+
+/**
+ * megasas_return_cmd - Return a cmd to free command pool
+ * @instance: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+inline void
+megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ __megasas_return_cmd(instance, cmd);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
}
@@ -921,13 +948,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
* Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
* Used to issue ioctl commands.
*/
-static int
+int
megasas_issue_blocked_cmd(struct megasas_instance *instance,
struct megasas_cmd *cmd, int timeout)
{
int ret = 0;
cmd->cmd_status = ENODATA;
+ cmd->is_wait_event = 1;
instance->instancet->issue_dcmd(instance, cmd);
if (timeout) {
ret = wait_event_timeout(instance->int_cmd_wait_q,
@@ -1536,7 +1564,7 @@ out_return_cmd:
* @done: Callback entry point
*/
static int
-megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
+megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{
struct megasas_instance *instance;
unsigned long flags;
@@ -1558,7 +1586,7 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
} else {
spin_unlock_irqrestore(&instance->hba_lock, flags);
scmd->result = DID_NO_CONNECT << 16;
- done(scmd);
+ scmd->scsi_done(scmd);
return 0;
}
}
@@ -1566,7 +1594,7 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
spin_unlock_irqrestore(&instance->hba_lock, flags);
scmd->result = DID_NO_CONNECT << 16;
- done(scmd);
+ scmd->scsi_done(scmd);
return 0;
}
@@ -1577,11 +1605,11 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
spin_unlock_irqrestore(&instance->hba_lock, flags);
- scmd->scsi_done = done;
scmd->result = 0;
if (MEGASAS_IS_LOGICAL(scmd) &&
- (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
+ (scmd->device->id >= instance->fw_supported_vd_count ||
+ scmd->device->lun)) {
scmd->result = DID_BAD_TARGET << 16;
goto out_done;
}
@@ -1606,12 +1634,10 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
return 0;
out_done:
- done(scmd);
+ scmd->scsi_done(scmd);
return 0;
}
-static DEF_SCSI_QCMD(megasas_queue_command)
-
static struct megasas_instance *megasas_lookup_instance(u16 host_no)
{
int i;
@@ -1628,36 +1654,12 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
static int megasas_slave_configure(struct scsi_device *sdev)
{
- u16 pd_index = 0;
- struct megasas_instance *instance ;
-
- instance = megasas_lookup_instance(sdev->host->host_no);
-
- /*
- * Don't export physical disk devices to the disk driver.
- *
- * FIXME: Currently we don't export them to the midlayer at all.
- * That will be fixed once LSI engineers have audited the
- * firmware for possible issues.
- */
- if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
- sdev->type == TYPE_DISK) {
- pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
- sdev->id;
- if (instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) {
- blk_queue_rq_timeout(sdev->request_queue,
- MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
- return 0;
- }
- return -ENXIO;
- }
-
/*
* The RAID firmware may require extended timeouts.
*/
blk_queue_rq_timeout(sdev->request_queue,
MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+
return 0;
}
@@ -1666,18 +1668,15 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
u16 pd_index = 0;
struct megasas_instance *instance ;
instance = megasas_lookup_instance(sdev->host->host_no);
- if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
- (sdev->type == TYPE_DISK)) {
+ if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
/*
* Open the OS scan to the SYSTEM PD
*/
pd_index =
(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
sdev->id;
- if ((instance->pd_list[pd_index].driveState ==
- MR_PD_STATE_SYSTEM) &&
- (instance->pd_list[pd_index].driveType ==
- TYPE_DISK)) {
+ if (instance->pd_list[pd_index].driveState ==
+ MR_PD_STATE_SYSTEM) {
return 0;
}
return -ENXIO;
@@ -1825,16 +1824,12 @@ void megasas_do_ocr(struct megasas_instance *instance)
process_fw_state_change_wq(&instance->work_init);
}
-/* This function will get the current SR-IOV LD/VF affiliation */
-static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
- int initial)
+static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
+ int initial)
{
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
- struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
- dma_addr_t new_affiliation_h;
dma_addr_t new_affiliation_111_h;
int ld, retval = 0;
u8 thisVf;
@@ -1842,15 +1837,15 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
cmd = megasas_get_cmd(instance);
if (!cmd) {
- printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
- "affiliation: Failed to get cmd for scsi%d.\n",
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:"
+ "Failed to get cmd for scsi%d.\n",
instance->host->host_no);
return -ENOMEM;
}
dcmd = &cmd->frame->dcmd;
- if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
+ if (!instance->vf_affiliation_111) {
printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
"affiliation for scsi%d.\n", instance->host->host_no);
megasas_return_cmd(instance, cmd);
@@ -1858,38 +1853,22 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
}
if (initial)
- if (instance->PlasmaFW111)
memset(instance->vf_affiliation_111, 0,
sizeof(struct MR_LD_VF_AFFILIATION_111));
- else
- memset(instance->vf_affiliation, 0,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION));
else {
- if (instance->PlasmaFW111)
- new_affiliation_111 =
- pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h);
- else
- new_affiliation =
- pci_alloc_consistent(instance->pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h);
- if (!new_affiliation && !new_affiliation_111) {
+ new_affiliation_111 =
+ pci_alloc_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &new_affiliation_111_h);
+ if (!new_affiliation_111) {
printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d.\n",
- instance->host->host_no);
+ instance->host->host_no);
megasas_return_cmd(instance, cmd);
return -ENOMEM;
}
- if (instance->PlasmaFW111)
- memset(new_affiliation_111, 0,
- sizeof(struct MR_LD_VF_AFFILIATION_111));
- else
- memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION));
+ memset(new_affiliation_111, 0,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -1900,34 +1879,17 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
dcmd->flags = MFI_FRAME_DIR_BOTH;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
- if (instance->PlasmaFW111) {
- dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
- dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
- } else {
- dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION);
- dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
- }
+ dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
- if (initial) {
- if (instance->PlasmaFW111)
- dcmd->sgl.sge32[0].phys_addr =
- instance->vf_affiliation_111_h;
- else
- dcmd->sgl.sge32[0].phys_addr =
- instance->vf_affiliation_h;
- } else {
- if (instance->PlasmaFW111)
- dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
- else
- dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
- }
- if (instance->PlasmaFW111)
- dcmd->sgl.sge32[0].length =
- sizeof(struct MR_LD_VF_AFFILIATION_111);
+ if (initial)
+ dcmd->sgl.sge32[0].phys_addr =
+ instance->vf_affiliation_111_h;
else
- dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+
+ dcmd->sgl.sge32[0].length =
+ sizeof(struct MR_LD_VF_AFFILIATION_111);
printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
"scsi%d\n", instance->host->host_no);
@@ -1943,80 +1905,222 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
}
if (!initial) {
- if (instance->PlasmaFW111) {
- if (!new_affiliation_111->vdCount) {
- printk(KERN_WARNING "megasas: SR-IOV: Got new "
- "LD/VF affiliation for passive path "
+ thisVf = new_affiliation_111->thisVf;
+ for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+ if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
+ new_affiliation_111->map[ld].policy[thisVf]) {
+ printk(KERN_WARNING "megasas: SR-IOV: "
+ "Got new LD/VF affiliation "
"for scsi%d.\n",
- instance->host->host_no);
- retval = 1;
- goto out;
- }
- thisVf = new_affiliation_111->thisVf;
- for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
- if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
- printk(KERN_WARNING "megasas: SR-IOV: "
- "Got new LD/VF affiliation "
- "for scsi%d.\n",
- instance->host->host_no);
- memcpy(instance->vf_affiliation_111,
- new_affiliation_111,
- sizeof(struct MR_LD_VF_AFFILIATION_111));
- retval = 1;
- goto out;
- }
- } else {
- if (!new_affiliation->ldCount) {
- printk(KERN_WARNING "megasas: SR-IOV: Got new "
- "LD/VF affiliation for passive "
- "path for scsi%d.\n",
instance->host->host_no);
+ memcpy(instance->vf_affiliation_111,
+ new_affiliation_111,
+ sizeof(struct MR_LD_VF_AFFILIATION_111));
retval = 1;
goto out;
}
- newmap = new_affiliation->map;
- savedmap = instance->vf_affiliation->map;
- thisVf = new_affiliation->thisVf;
- for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
- if (savedmap->policy[thisVf] !=
- newmap->policy[thisVf]) {
- printk(KERN_WARNING "megasas: SR-IOV: "
- "Got new LD/VF affiliation "
- "for scsi%d.\n",
- instance->host->host_no);
- memcpy(instance->vf_affiliation,
- new_affiliation,
- new_affiliation->size);
- retval = 1;
- goto out;
+ }
+out:
+ if (new_affiliation_111) {
+ pci_free_consistent(instance->pdev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ new_affiliation_111,
+ new_affiliation_111_h);
+ }
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+
+ return retval;
+}
+
+static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
+ int initial)
+{
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+ struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+ struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+ dma_addr_t new_affiliation_h;
+ int i, j, retval = 0, found = 0, doscan = 0;
+ u8 thisVf;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: "
+ "Failed to get cmd for scsi%d.\n",
+ instance->host->host_no);
+ return -ENOMEM;
+ }
+
+ dcmd = &cmd->frame->dcmd;
+
+ if (!instance->vf_affiliation) {
+ printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+
+ if (initial)
+ memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ else {
+ new_affiliation =
+ pci_alloc_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &new_affiliation_h);
+ if (!new_affiliation) {
+ printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+ "memory for new affiliation for scsi%d.\n",
+ instance->host->host_no);
+ megasas_return_cmd(instance, cmd);
+ return -ENOMEM;
+ }
+ memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION));
+ }
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = MFI_FRAME_DIR_BOTH;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+ dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+
+ if (initial)
+ dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h;
+ else
+ dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+
+ dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION);
+
+ printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+ "scsi%d\n", instance->host->host_no);
+
+ megasas_issue_blocked_cmd(instance, cmd, 0);
+
+ if (dcmd->cmd_status) {
+ printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+ " failed with status 0x%x for scsi%d.\n",
+ dcmd->cmd_status, instance->host->host_no);
+ retval = 1; /* Do a scan if we couldn't get affiliation */
+ goto out;
+ }
+
+ if (!initial) {
+ if (!new_affiliation->ldCount) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+ "affiliation for passive path for scsi%d.\n",
+ instance->host->host_no);
+ retval = 1;
+ goto out;
+ }
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+ thisVf = new_affiliation->thisVf;
+ for (i = 0 ; i < new_affiliation->ldCount; i++) {
+ found = 0;
+ for (j = 0; j < instance->vf_affiliation->ldCount;
+ j++) {
+ if (newmap->ref.targetId ==
+ savedmap->ref.targetId) {
+ found = 1;
+ if (newmap->policy[thisVf] !=
+ savedmap->policy[thisVf]) {
+ doscan = 1;
+ goto out;
+ }
}
savedmap = (struct MR_LD_VF_MAP *)
((unsigned char *)savedmap +
savedmap->size);
+ }
+ if (!found && newmap->policy[thisVf] !=
+ MR_LD_ACCESS_HIDDEN) {
+ doscan = 1;
+ goto out;
+ }
+ newmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)newmap + newmap->size);
+ }
+
+ newmap = new_affiliation->map;
+ savedmap = instance->vf_affiliation->map;
+
+ for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
+ found = 0;
+ for (j = 0 ; j < new_affiliation->ldCount; j++) {
+ if (savedmap->ref.targetId ==
+ newmap->ref.targetId) {
+ found = 1;
+ if (savedmap->policy[thisVf] !=
+ newmap->policy[thisVf]) {
+ doscan = 1;
+ goto out;
+ }
+ }
newmap = (struct MR_LD_VF_MAP *)
((unsigned char *)newmap +
newmap->size);
}
+ if (!found && savedmap->policy[thisVf] !=
+ MR_LD_ACCESS_HIDDEN) {
+ doscan = 1;
+ goto out;
+ }
+ savedmap = (struct MR_LD_VF_MAP *)
+ ((unsigned char *)savedmap +
+ savedmap->size);
}
}
out:
- if (new_affiliation) {
- if (instance->PlasmaFW111)
- pci_free_consistent(instance->pdev,
- sizeof(struct MR_LD_VF_AFFILIATION_111),
- new_affiliation_111,
- new_affiliation_111_h);
- else
- pci_free_consistent(instance->pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- new_affiliation, new_affiliation_h);
+ if (doscan) {
+ printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+ "affiliation for scsi%d.\n", instance->host->host_no);
+ memcpy(instance->vf_affiliation, new_affiliation,
+ new_affiliation->size);
+ retval = 1;
}
- megasas_return_cmd(instance, cmd);
+
+ if (new_affiliation)
+ pci_free_consistent(instance->pdev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ new_affiliation, new_affiliation_h);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return retval;
}
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+ int initial)
+{
+ int retval;
+
+ if (instance->PlasmaFW111)
+ retval = megasas_get_ld_vf_affiliation_111(instance, initial);
+ else
+ retval = megasas_get_ld_vf_affiliation_12(instance, initial);
+ return retval;
+}
+
/* This function will tell FW to start the SR-IOV heartbeat */
int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
int initial)
@@ -2459,7 +2563,12 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
cmd->abort_aen = 0;
instance->aen_cmd = NULL;
- megasas_return_cmd(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
if ((instance->unload == 0) &&
((instance->issuepend_done == 1))) {
@@ -2491,6 +2600,152 @@ static int megasas_change_queue_depth(struct scsi_device *sdev,
return queue_depth;
}
+static ssize_t
+megasas_fw_crash_buffer_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ int val = 0;
+ unsigned long flags;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ instance->fw_crash_buffer_offset = val;
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_buffer_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ u32 size;
+ unsigned long buff_addr;
+ unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long src_addr;
+ unsigned long flags;
+ u32 buff_offset;
+
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ buff_offset = instance->fw_crash_buffer_offset;
+ if (!instance->crash_dump_buf &&
+ !((instance->fw_crash_state == AVAILABLE) ||
+ (instance->fw_crash_state == COPYING))) {
+ dev_err(&instance->pdev->dev,
+ "Firmware crash dump is not available\n");
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return -EINVAL;
+ }
+
+ buff_addr = (unsigned long) buf;
+
+ if (buff_offset >
+ (instance->fw_crash_buffer_size * dmachunk)) {
+ dev_err(&instance->pdev->dev,
+ "Firmware crash dump offset is out of range\n");
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ return 0;
+ }
+
+ size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+
+ src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+ (buff_offset % dmachunk);
+ memcpy(buf, (void *)src_addr, size);
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+
+ return size;
+}
+
+static ssize_t
+megasas_fw_crash_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
+ ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
+}
+
+static ssize_t
+megasas_fw_crash_state_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ int val = 0;
+ unsigned long flags;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+
+ if ((val <= AVAILABLE || val > COPY_ERROR)) {
+ dev_err(&instance->pdev->dev, "application updates invalid "
+ "firmware crash state\n");
+ return -EINVAL;
+ }
+
+ instance->fw_crash_state = val;
+
+ if ((val == COPIED) || (val == COPY_ERROR)) {
+ spin_lock_irqsave(&instance->crashdump_lock, flags);
+ megasas_free_host_crash_buffer(instance);
+ spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+ if (val == COPY_ERROR)
+ dev_info(&instance->pdev->dev, "application failed to "
+ "copy Firmware crash dump\n");
+ else
+ dev_info(&instance->pdev->dev, "Firmware crash dump "
+ "copied successfully\n");
+ }
+ return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_state_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct megasas_instance *instance =
+ (struct megasas_instance *) shost->hostdata;
+ return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
+}
+
+static ssize_t
+megasas_page_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
+}
+
+static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
+ megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
+static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
+ megasas_fw_crash_buffer_size_show, NULL);
+static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
+ megasas_fw_crash_state_show, megasas_fw_crash_state_store);
+static DEVICE_ATTR(page_size, S_IRUGO,
+ megasas_page_size_show, NULL);
+
+struct device_attribute *megaraid_host_attrs[] = {
+ &dev_attr_fw_crash_buffer_size,
+ &dev_attr_fw_crash_buffer,
+ &dev_attr_fw_crash_state,
+ &dev_attr_page_size,
+ NULL,
+};
+
/*
* Scsi host template for megaraid_sas driver
*/
@@ -2506,6 +2761,7 @@ static struct scsi_host_template megasas_template = {
.eh_bus_reset_handler = megasas_reset_bus_host,
.eh_host_reset_handler = megasas_reset_bus_host,
.eh_timed_out = megasas_reset_timer,
+ .shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
.use_clustering = ENABLE_CLUSTERING,
.change_queue_depth = megasas_change_queue_depth,
@@ -2688,7 +2944,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
"failed, status = 0x%x.\n",
cmd->frame->hdr.cmd_status);
else {
- megasas_return_cmd(instance, cmd);
+ megasas_return_mfi_mpt_pthr(instance,
+ cmd, cmd->mpt_pthr_cmd_blocked);
spin_unlock_irqrestore(
instance->host->host_lock,
flags);
@@ -2696,7 +2953,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
}
} else
instance->map_id++;
- megasas_return_cmd(instance, cmd);
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
/*
* Set fast path IO to ZERO.
@@ -2852,7 +3110,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
unsigned long flags;
defer_index = 0;
- spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
if (cmd->sync_cmd == 1 || cmd->scmd) {
@@ -2873,7 +3131,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
&instance->internal_reset_pending_q);
}
}
- spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
}
@@ -3438,7 +3696,9 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
int j;
u32 max_cmd;
struct megasas_cmd *cmd;
+ struct fusion_context *fusion;
+ fusion = instance->ctrl_context;
max_cmd = instance->max_mfi_cmds;
/*
@@ -3471,13 +3731,11 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
}
}
- /*
- * Add all the commands to command pool (instance->cmd_pool)
- */
for (i = 0; i < max_cmd; i++) {
cmd = instance->cmd_list[i];
memset(cmd, 0, sizeof(struct megasas_cmd));
cmd->index = i;
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
cmd->scmd = NULL;
cmd->instance = instance;
@@ -3548,11 +3806,11 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
- if (!megasas_issue_polled(instance, cmd)) {
- ret = 0;
- } else {
- ret = -1;
- }
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
/*
* the following function will get the instance PD LIST.
@@ -3584,7 +3842,12 @@ megasas_get_pd_list(struct megasas_instance *instance)
pci_free_consistent(instance->pdev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
ci, ci_h);
- megasas_return_cmd(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3630,6 +3893,8 @@ megasas_get_ld_list(struct megasas_instance *instance)
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ if (instance->supportmax256vd)
+ dcmd->mbox.b[0] = 1;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -3641,18 +3906,19 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0;
- if (!megasas_issue_polled(instance, cmd)) {
- ret = 0;
- } else {
- ret = -1;
- }
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
ld_count = le32_to_cpu(ci->ldCount);
/* the following function will get the instance PD LIST */
- if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
- memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+ if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
+ memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
for (ld_index = 0; ld_index < ld_count; ld_index++) {
if (ci->ldList[ld_index].state != 0) {
@@ -3668,7 +3934,11 @@ megasas_get_ld_list(struct megasas_instance *instance)
ci,
ci_h);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3715,6 +3985,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->mbox.b[0] = query_type;
+ if (instance->supportmax256vd)
+ dcmd->mbox.b[2] = 1;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
@@ -3727,16 +3999,15 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->pad_0 = 0;
- if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
- ret = 0;
- } else {
- /* On failure, call older LD list DCMD */
- ret = 1;
- }
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
tgtid_count = le32_to_cpu(ci->count);
- if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
+ if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
ids = ci->targetId[ld_index];
@@ -3748,7 +4019,11 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
ci, ci_h);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3762,7 +4037,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
* This information is mainly used to find out the maximum IO transfer per
* command supported by the FW.
*/
-static int
+int
megasas_get_ctrl_info(struct megasas_instance *instance,
struct megasas_ctrl_info *ctrl_info)
{
@@ -3803,18 +4078,84 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+ dcmd->mbox.b[0] = 1;
- if (!megasas_issue_polled(instance, cmd)) {
- ret = 0;
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (!ret)
memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
- } else {
- ret = -1;
- }
pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
ci, ci_h);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
+ return ret;
+}
+
+/*
+ * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
+ * to firmware
+ *
+ * @instance: Adapter soft state
+ * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
+ MR_CRASH_BUF_TURN_OFF = 0
+ MR_CRASH_BUF_TURN_ON = 1
+ * @return 0 on success non-zero on failure.
+ * Issues an internal command (DCMD) to set parameters for crash dump feature.
+ * Driver will send address of crash dump DMA buffer and set mbox to tell FW
+ * that driver supports crash dump feature. This DCMD will be sent only if
+ * crash dump feature is supported by the FW.
+ *
+ */
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+ u8 crash_buf_state)
+{
+ int ret = 0;
+ struct megasas_cmd *cmd;
+ struct megasas_dcmd_frame *dcmd;
+
+ cmd = megasas_get_cmd(instance);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
+ return -ENOMEM;
+ }
+
+
+ dcmd = &cmd->frame->dcmd;
+
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ dcmd->mbox.b[0] = crash_buf_state;
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+ dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
+ dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
+ dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
+
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -3948,6 +4289,13 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
if (megasas_issue_init_mfi(instance))
goto fail_fw_init;
+ if (megasas_get_ctrl_info(instance, instance->ctrl_info)) {
+ dev_err(&instance->pdev->dev, "(%d): Could get controller info "
+ "Fail from %s %d\n", instance->unique_id,
+ __func__, __LINE__);
+ goto fail_fw_init;
+ }
+
instance->fw_support_ieee = 0;
instance->fw_support_ieee =
(instance->instancet->read_fw_status_reg(reg_set) &
@@ -3986,7 +4334,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
u32 tmp_sectors, msix_enable, scratch_pad_2;
resource_size_t base_addr;
struct megasas_register_set __iomem *reg_set;
- struct megasas_ctrl_info *ctrl_info;
+ struct megasas_ctrl_info *ctrl_info = NULL;
unsigned long bar_list;
int i, loop, fw_msix_count = 0;
struct IOV_111 *iovPtr;
@@ -4103,17 +4451,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
(unsigned int)num_online_cpus());
for (i = 0; i < instance->msix_vectors; i++)
instance->msixentry[i].entry = i;
- i = pci_enable_msix(instance->pdev, instance->msixentry,
- instance->msix_vectors);
- if (i >= 0) {
- if (i) {
- if (!pci_enable_msix(instance->pdev,
- instance->msixentry, i))
- instance->msix_vectors = i;
- else
- instance->msix_vectors = 0;
- }
- } else
+ i = pci_enable_msix_range(instance->pdev, instance->msixentry,
+ 1, instance->msix_vectors);
+ if (i)
+ instance->msix_vectors = i;
+ else
instance->msix_vectors = 0;
dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
@@ -4123,6 +4465,17 @@ static int megasas_init_fw(struct megasas_instance *instance)
instance->msix_vectors);
}
+ instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
+ GFP_KERNEL);
+ if (instance->ctrl_info == NULL)
+ goto fail_init_adapter;
+
+ /*
+ * Below are default value for legacy Firmware.
+ * non-fusion based controllers
+ */
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
@@ -4145,8 +4498,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
megasas_get_ld_list(instance);
- ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
-
/*
* Compute the max allowed sectors per IO: The controller info has two
* limits on max sectors. Driver should use the minimum of these two.
@@ -4157,58 +4508,79 @@ static int megasas_init_fw(struct megasas_instance *instance)
* to calculate max_sectors_1. So the number ended up as zero always.
*/
tmp_sectors = 0;
- if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
+ ctrl_info = instance->ctrl_info;
- max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
- le16_to_cpu(ctrl_info->max_strips_per_io);
- max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
+ max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+ le16_to_cpu(ctrl_info->max_strips_per_io);
+ max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
- tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+ tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
- /*Check whether controller is iMR or MR */
- if (ctrl_info->memory_size) {
- instance->is_imr = 0;
- dev_info(&instance->pdev->dev, "Controller type: MR,"
- "Memory size is: %dMB\n",
- le16_to_cpu(ctrl_info->memory_size));
- } else {
- instance->is_imr = 1;
- dev_info(&instance->pdev->dev,
- "Controller type: iMR\n");
- }
- /* OnOffProperties are converted into CPU arch*/
- le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
- instance->disableOnlineCtrlReset =
- ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
- /* adapterOperations2 are converted into CPU arch*/
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
- instance->mpio = ctrl_info->adapterOperations2.mpio;
- instance->UnevenSpanSupport =
- ctrl_info->adapterOperations2.supportUnevenSpans;
- if (instance->UnevenSpanSupport) {
- struct fusion_context *fusion = instance->ctrl_context;
- dev_info(&instance->pdev->dev, "FW supports: "
- "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
- if (MR_ValidateMapInfo(instance))
- fusion->fast_path_io = 1;
- else
- fusion->fast_path_io = 0;
+ /*Check whether controller is iMR or MR */
+ if (ctrl_info->memory_size) {
+ instance->is_imr = 0;
+ dev_info(&instance->pdev->dev, "Controller type: MR,"
+ "Memory size is: %dMB\n",
+ le16_to_cpu(ctrl_info->memory_size));
+ } else {
+ instance->is_imr = 1;
+ dev_info(&instance->pdev->dev,
+ "Controller type: iMR\n");
+ }
+ /* OnOffProperties are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
+ instance->disableOnlineCtrlReset =
+ ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ /* adapterOperations2 are converted into CPU arch*/
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+ instance->mpio = ctrl_info->adapterOperations2.mpio;
+ instance->UnevenSpanSupport =
+ ctrl_info->adapterOperations2.supportUnevenSpans;
+ if (instance->UnevenSpanSupport) {
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ dev_info(&instance->pdev->dev, "FW supports: "
+ "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
+ if (MR_ValidateMapInfo(instance))
+ fusion->fast_path_io = 1;
+ else
+ fusion->fast_path_io = 0;
+ }
+ if (ctrl_info->host_interface.SRIOV) {
+ if (!ctrl_info->adapterOperations2.activePassive)
+ instance->PlasmaFW111 = 1;
+
+ if (!instance->PlasmaFW111)
+ instance->requestorId =
+ ctrl_info->iov.requestorId;
+ else {
+ iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+ instance->requestorId = iovPtr->requestorId;
}
- if (ctrl_info->host_interface.SRIOV) {
- if (!ctrl_info->adapterOperations2.activePassive)
- instance->PlasmaFW111 = 1;
-
- if (!instance->PlasmaFW111)
- instance->requestorId =
- ctrl_info->iov.requestorId;
- else {
- iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
- instance->requestorId = iovPtr->requestorId;
- }
- printk(KERN_WARNING "megaraid_sas: I am VF "
- "requestorId %d\n", instance->requestorId);
- }
+ dev_warn(&instance->pdev->dev, "I am VF "
+ "requestorId %d\n", instance->requestorId);
+ }
+
+ le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+ instance->crash_dump_fw_support =
+ ctrl_info->adapterOperations3.supportCrashDump;
+ instance->crash_dump_drv_support =
+ (instance->crash_dump_fw_support &&
+ instance->crash_dump_buf);
+ if (instance->crash_dump_drv_support) {
+ dev_info(&instance->pdev->dev, "Firmware Crash dump "
+ "feature is supported\n");
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_OFF);
+
+ } else {
+ if (instance->crash_dump_buf)
+ pci_free_consistent(instance->pdev,
+ CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf,
+ instance->crash_dump_h);
+ instance->crash_dump_buf = NULL;
}
instance->max_sectors_per_req = instance->max_num_sge *
PAGE_SIZE / 512;
@@ -4256,6 +4628,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
fail_init_adapter:
fail_ready_state:
+ kfree(instance->ctrl_info);
+ instance->ctrl_info = NULL;
iounmap(instance->reg_set);
fail_ioremap:
@@ -4351,7 +4725,11 @@ megasas_get_seq_num(struct megasas_instance *instance,
pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
el_info, el_info_h);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return 0;
}
@@ -4634,6 +5012,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
struct Scsi_Host *host;
struct megasas_instance *instance;
u16 control = 0;
+ struct fusion_context *fusion = NULL;
/* Reset MSI-X in the kdump kernel */
if (reset_devices) {
@@ -4694,10 +5073,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
{
- struct fusion_context *fusion;
-
- instance->ctrl_context =
- kzalloc(sizeof(struct fusion_context), GFP_KERNEL);
+ instance->ctrl_context_pages =
+ get_order(sizeof(struct fusion_context));
+ instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
+ instance->ctrl_context_pages);
if (!instance->ctrl_context) {
printk(KERN_DEBUG "megasas: Failed to allocate "
"memory for Fusion context info\n");
@@ -4705,7 +5084,9 @@ static int megasas_probe_one(struct pci_dev *pdev,
}
fusion = instance->ctrl_context;
INIT_LIST_HEAD(&fusion->cmd_pool);
- spin_lock_init(&fusion->cmd_pool_lock);
+ spin_lock_init(&fusion->mpt_pool_lock);
+ memset(fusion->load_balance_info, 0,
+ sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
}
break;
default: /* For all other supported controllers */
@@ -4728,13 +5109,29 @@ static int megasas_probe_one(struct pci_dev *pdev,
break;
}
+ /* Crash dump feature related initialisation*/
+ instance->drv_buf_index = 0;
+ instance->drv_buf_alloc = 0;
+ instance->crash_dump_fw_support = 0;
+ instance->crash_dump_app_support = 0;
+ instance->fw_crash_state = UNAVAILABLE;
+ spin_lock_init(&instance->crashdump_lock);
+ instance->crash_dump_buf = NULL;
+
+ if (!reset_devices)
+ instance->crash_dump_buf = pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
+ if (!instance->crash_dump_buf)
+ dev_err(&instance->pdev->dev, "Can't allocate Firmware "
+ "crash dump DMA buffer\n");
+
megasas_poll_wait_aen = 0;
instance->flag_ieee = 0;
instance->ev = NULL;
instance->issuepend_done = 1;
instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
instance->is_imr = 0;
- megasas_poll_wait_aen = 0;
instance->evt_detail = pci_alloc_consistent(pdev,
sizeof(struct
@@ -4758,7 +5155,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
- spin_lock_init(&instance->cmd_pool_lock);
+ spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->completion_lock);
@@ -4771,13 +5168,14 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->host = host;
instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+ instance->ctrl_info = NULL;
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
instance->flag_ieee = 1;
sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
} else
- sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
+ sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
megasas_dbg_lvl = 0;
instance->flag = 0;
@@ -4789,9 +5187,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
- else
+ INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
+ } else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
/*
@@ -4836,8 +5235,9 @@ retry_irq_register:
printk(KERN_DEBUG "megasas: Failed to "
"register IRQ for vector %d.\n", i);
for (j = 0; j < i; j++) {
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
free_irq(
instance->msixentry[j].vector,
&instance->irq_context[j]);
@@ -4846,11 +5246,14 @@ retry_irq_register:
instance->msix_vectors = 0;
goto retry_irq_register;
}
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev, "Error setting"
- "affinity hint for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev,
+ "Error setting affinity hint "
+ "for cpu %d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
}
} else {
instance->irq_context[0].instance = instance;
@@ -4894,6 +5297,10 @@ retry_irq_register:
goto fail_start_aen;
}
+ /* Get current SR-IOV LD/VF affiliation */
+ if (instance->requestorId)
+ megasas_get_ld_vf_affiliation(instance, 1);
+
return 0;
fail_start_aen:
@@ -4905,8 +5312,9 @@ retry_irq_register:
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
}
@@ -4979,7 +5387,11 @@ static void megasas_flush_cache(struct megasas_instance *instance)
dev_err(&instance->pdev->dev, "Command timedout"
" from %s\n", __func__);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return;
}
@@ -5026,7 +5438,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
dev_err(&instance->pdev->dev, "Command timedout"
"from %s\n", __func__);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return;
}
@@ -5069,8 +5485,9 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
}
@@ -5132,9 +5549,10 @@ megasas_resume(struct pci_dev *pdev)
goto fail_ready_state;
/* Now re-enable MSI-X */
- if (instance->msix_vectors)
- pci_enable_msix(instance->pdev, instance->msixentry,
- instance->msix_vectors);
+ if (instance->msix_vectors &&
+ pci_enable_msix_exact(instance->pdev, instance->msixentry,
+ instance->msix_vectors))
+ goto fail_reenable_msix;
switch (instance->pdev->device) {
case PCI_DEVICE_ID_LSI_FUSION:
@@ -5178,8 +5596,9 @@ megasas_resume(struct pci_dev *pdev)
printk(KERN_DEBUG "megasas: Failed to "
"register IRQ for vector %d.\n", i);
for (j = 0; j < i; j++) {
- irq_set_affinity_hint(
- instance->msixentry[j].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[j].vector, NULL);
free_irq(
instance->msixentry[j].vector,
&instance->irq_context[j]);
@@ -5187,11 +5606,14 @@ megasas_resume(struct pci_dev *pdev)
goto fail_irq;
}
- if (irq_set_affinity_hint(instance->msixentry[i].vector,
- get_cpu_mask(cpu)))
- dev_err(&instance->pdev->dev, "Error setting"
- "affinity hint for cpu %d\n", cpu);
- cpu = cpumask_next(cpu, cpu_online_mask);
+ if (smp_affinity_enable) {
+ if (irq_set_affinity_hint(instance->msixentry[i].vector,
+ get_cpu_mask(cpu)))
+ dev_err(&instance->pdev->dev, "Error "
+ "setting affinity hint for cpu "
+ "%d\n", cpu);
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
}
} else {
instance->irq_context[0].instance = instance;
@@ -5243,6 +5665,7 @@ fail_init_mfi:
fail_set_dma_mask:
fail_ready_state:
+fail_reenable_msix:
pci_disable_device(pdev);
@@ -5273,6 +5696,8 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
+ if (instance->fw_crash_state != UNAVAILABLE)
+ megasas_free_host_crash_buffer(instance);
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -5306,8 +5731,9 @@ static void megasas_detach_one(struct pci_dev *pdev)
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
}
@@ -5322,14 +5748,18 @@ static void megasas_detach_one(struct pci_dev *pdev)
case PCI_DEVICE_ID_LSI_INVADER:
case PCI_DEVICE_ID_LSI_FURY:
megasas_release_fusion(instance);
- for (i = 0; i < 2 ; i++)
+ for (i = 0; i < 2 ; i++) {
if (fusion->ld_map[i])
dma_free_coherent(&instance->pdev->dev,
- fusion->map_sz,
+ fusion->max_map_sz,
fusion->ld_map[i],
- fusion->
- ld_map_phys[i]);
- kfree(instance->ctrl_context);
+ fusion->ld_map_phys[i]);
+ if (fusion->ld_drv_map[i])
+ free_pages((ulong)fusion->ld_drv_map[i],
+ fusion->drv_map_pages);
+ }
+ free_pages((ulong)instance->ctrl_context,
+ instance->ctrl_context_pages);
break;
default:
megasas_release_mfi(instance);
@@ -5342,6 +5772,8 @@ static void megasas_detach_one(struct pci_dev *pdev)
break;
}
+ kfree(instance->ctrl_info);
+
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail, instance->evt_detail_h);
@@ -5363,6 +5795,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
instance->hb_host_mem,
instance->hb_host_mem_h);
+ if (instance->crash_dump_buf)
+ pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf, instance->crash_dump_h);
+
scsi_host_put(host);
pci_disable_device(pdev);
@@ -5385,8 +5821,9 @@ static void megasas_shutdown(struct pci_dev *pdev)
instance->instancet->disable_intr(instance);
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
- irq_set_affinity_hint(
- instance->msixentry[i].vector, NULL);
+ if (smp_affinity_enable)
+ irq_set_affinity_hint(
+ instance->msixentry[i].vector, NULL);
free_irq(instance->msixentry[i].vector,
&instance->irq_context[i]);
}
@@ -5448,12 +5885,53 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
spin_lock_irqsave(&poll_aen_lock, flags);
if (megasas_poll_wait_aen)
mask = (POLLIN | POLLRDNORM);
+
else
mask = 0;
+ megasas_poll_wait_aen = 0;
spin_unlock_irqrestore(&poll_aen_lock, flags);
return mask;
}
+/*
+ * megasas_set_crash_dump_params_ioctl:
+ * Send CRASH_DUMP_MODE DCMD to all controllers
+ * @cmd: MFI command frame
+ */
+
+static int megasas_set_crash_dump_params_ioctl(
+ struct megasas_cmd *cmd)
+{
+ struct megasas_instance *local_instance;
+ int i, error = 0;
+ int crash_support;
+
+ crash_support = cmd->frame->dcmd.mbox.w[0];
+
+ for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+ local_instance = megasas_mgmt_info.instance[i];
+ if (local_instance && local_instance->crash_dump_drv_support) {
+ if ((local_instance->adprecovery ==
+ MEGASAS_HBA_OPERATIONAL) &&
+ !megasas_set_crash_dump_params(local_instance,
+ crash_support)) {
+ local_instance->crash_dump_app_support =
+ crash_support;
+ dev_info(&local_instance->pdev->dev,
+ "Application firmware crash "
+ "dump mode set success\n");
+ error = 0;
+ } else {
+ dev_info(&local_instance->pdev->dev,
+ "Application firmware crash "
+ "dump mode set failed\n");
+ error = -1;
+ }
+ }
+ }
+ return error;
+}
+
/**
* megasas_mgmt_fw_ioctl - Issues management ioctls to FW
* @instance: Adapter soft state
@@ -5500,6 +5978,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
MFI_FRAME_SGL64 |
MFI_FRAME_SENSE64));
+ if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
+ error = megasas_set_crash_dump_params_ioctl(cmd);
+ megasas_return_cmd(instance, cmd);
+ return error;
+ }
+
/*
* The management interface between applications and the fw uses
* MFI frames. E.g, RAID configuration changes, LD property changes
@@ -5619,9 +6103,14 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
le32_to_cpu(kern_sge32[i].length),
kbuff_arr[i],
le32_to_cpu(kern_sge32[i].phys_addr));
+ kbuff_arr[i] = NULL;
}
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return error;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 081bfff12d00..685e6f391fe4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -55,6 +55,13 @@
#include "megaraid_sas.h"
#include <asm/div64.h>
+#define LB_PENDING_CMDS_DEFAULT 4
+static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+module_param(lb_pending_cmds, int, S_IRUGO);
+MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
+ "threshold. Valid Values are 1-128. Default: 4");
+
+
#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
#define MR_LD_STATE_OPTIMAL 3
#define FALSE 0
@@ -66,16 +73,13 @@
#define SPAN_INVALID 0xff
/* Prototypes */
-void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
- struct LD_LOAD_BALANCE_INFO *lbInfo);
-
-static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
PLD_SPAN_INFO ldSpanInfo);
static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
- struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
+ struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
- u64 strip, struct MR_FW_RAID_MAP_ALL *map);
+ u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
u32 mega_mod64(u64 dividend, u32 divisor)
{
@@ -109,94 +113,183 @@ u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
return d;
}
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
{
return &map->raidMap.ldSpanMap[ld].ldRaid;
}
static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
- struct MR_FW_RAID_MAP_ALL
+ struct MR_DRV_RAID_MAP_ALL
*map)
{
return &map->raidMap.ldSpanMap[ld].spanBlock[0];
}
-static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
+static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
}
-u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
{
return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
}
-u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
{
return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
}
-u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.devHndlInfo[pd].curDevHdl;
}
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
{
return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
}
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
{
return map->raidMap.ldTgtIdToLd[ldTgtId];
}
static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
}
/*
+ * This function will Populate Driver Map using firmware raid map
+ */
+void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion = instance->ctrl_context;
+ struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
+ struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
+ int i;
+
+
+ struct MR_DRV_RAID_MAP_ALL *drv_map =
+ fusion->ld_drv_map[(instance->map_id & 1)];
+ struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+ if (instance->supportmax256vd) {
+ memcpy(fusion->ld_drv_map[instance->map_id & 1],
+ fusion->ld_map[instance->map_id & 1],
+ fusion->current_map_sz);
+ /* New Raid map will not set totalSize, so keep expected value
+ * for legacy code in ValidateMapInfo
+ */
+ pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT);
+ } else {
+ fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
+ fusion->ld_map[(instance->map_id & 1)];
+ pFwRaidMap = &fw_map_old->raidMap;
+
+#if VD_EXT_DEBUG
+ for (i = 0; i < pFwRaidMap->ldCount; i++) {
+ dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
+ "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
+ instance->unique_id, i,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
+ fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
+ }
+#endif
+
+ memset(drv_map, 0, fusion->drv_map_sz);
+ pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+ pDrvRaidMap->ldCount = pFwRaidMap->ldCount;
+ pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+ for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
+ pDrvRaidMap->ldTgtIdToLd[i] =
+ (u8)pFwRaidMap->ldTgtIdToLd[i];
+ for (i = 0; i < pDrvRaidMap->ldCount; i++) {
+ pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
+#if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+ "size 0x%x\n", i, i,
+ pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ dev_dbg(&instance->pdev->dev,
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+ "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+ "size 0x%x\n", i, i,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
+ pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
+ (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
+ dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
+ "raid map %p LD RAID MAP %p/%p\n", drv_map,
+ pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
+ &pDrvRaidMap->ldSpanMap[i].ldRaid);
+#endif
+ }
+ memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
+ sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
+ memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
+ sizeof(struct MR_DEV_HANDLE_INFO) *
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
+ }
+}
+
+/*
* This function will validate Map info data provided by FW
*/
u8 MR_ValidateMapInfo(struct megasas_instance *instance)
{
- struct fusion_context *fusion = instance->ctrl_context;
- struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
- struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
- PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
- struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+ struct MR_DRV_RAID_MAP *pDrvRaidMap;
+ struct LD_LOAD_BALANCE_INFO *lbInfo;
+ PLD_SPAN_INFO ldSpanInfo;
struct MR_LD_RAID *raid;
int ldCount, num_lds;
u16 ld;
+ u32 expected_size;
+
+
+ MR_PopulateDrvRaidMap(instance);
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ pDrvRaidMap = &drv_map->raidMap;
+ lbInfo = fusion->load_balance_info;
+ ldSpanInfo = fusion->log_to_span;
- if (le32_to_cpu(pFwRaidMap->totalSize) !=
- (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
- printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
- (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
- sizeof(struct MR_LD_SPAN_MAP)) +
- (sizeof(struct MR_LD_SPAN_MAP) *
- le32_to_cpu(pFwRaidMap->ldCount))));
- printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
- ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
- le32_to_cpu(pFwRaidMap->totalSize));
+ if (instance->supportmax256vd)
+ expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
+ else
+ expected_size =
+ (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
+ (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount)));
+
+ if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
+ dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
+ (unsigned int) expected_size);
+ dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
+ (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
+ le32_to_cpu(pDrvRaidMap->totalSize));
return 0;
}
if (instance->UnevenSpanSupport)
- mr_update_span_set(map, ldSpanInfo);
+ mr_update_span_set(drv_map, ldSpanInfo);
- mr_update_load_balance_params(map, lbInfo);
+ mr_update_load_balance_params(drv_map, lbInfo);
- num_lds = le32_to_cpu(map->raidMap.ldCount);
+ num_lds = le32_to_cpu(drv_map->raidMap.ldCount);
/*Convert Raid capability values to CPU arch */
for (ldCount = 0; ldCount < num_lds; ldCount++) {
- ld = MR_TargetIdToLdGet(ldCount, map);
- raid = MR_LdRaidGet(ld, map);
+ ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ raid = MR_LdRaidGet(ld, drv_map);
le32_to_cpus((u32 *)&raid->capability);
}
@@ -204,7 +297,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
}
u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
struct MR_QUAD_ELEMENT *quad;
@@ -246,7 +339,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
* ldSpanInfo - ldSpanInfo per HBA instance
*/
#if SPAN_DEBUG
-static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
{
u8 span;
@@ -257,9 +351,9 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
int ldCount;
u16 ld;
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES)
+ if (ld >= MAX_LOGICAL_DRIVES_EXT)
continue;
raid = MR_LdRaidGet(ld, map);
dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
@@ -339,7 +433,7 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
*/
u32 mr_spanset_get_span_block(struct megasas_instance *instance,
- u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
+ u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -402,7 +496,7 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
*/
static u64 get_row_from_strip(struct megasas_instance *instance,
- u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+ u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -471,7 +565,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
*/
static u64 get_strip_from_row(struct megasas_instance *instance,
- u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
+ u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -532,7 +626,7 @@ static u64 get_strip_from_row(struct megasas_instance *instance,
*/
static u32 get_arm_from_strip(struct megasas_instance *instance,
- u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+ u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
{
struct fusion_context *fusion = instance->ctrl_context;
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -580,7 +674,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
/* This Function will return Phys arm */
u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
/* Need to check correct default value */
@@ -624,7 +718,7 @@ u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
u32 pd, arRef;
@@ -682,6 +776,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
+ io_info->span_arm = pRAID_Context->spanArm;
return retval;
}
@@ -705,7 +800,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
u16 stripRef, struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map)
+ struct MR_DRV_RAID_MAP_ALL *map)
{
struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
u32 pd, arRef;
@@ -778,6 +873,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
physArm;
+ io_info->span_arm = pRAID_Context->spanArm;
return retval;
}
@@ -794,7 +890,7 @@ u8
MR_BuildRaidContext(struct megasas_instance *instance,
struct IO_REQUEST_INFO *io_info,
struct RAID_CONTEXT *pRAID_Context,
- struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
+ struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
{
struct MR_LD_RAID *raid;
u32 ld, stripSize, stripe_mask;
@@ -1043,8 +1139,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
* ldSpanInfo - ldSpanInfo per HBA instance
*
*/
-void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
- PLD_SPAN_INFO ldSpanInfo)
+void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+ PLD_SPAN_INFO ldSpanInfo)
{
u8 span, count;
u32 element, span_row_width;
@@ -1056,9 +1152,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
u16 ld;
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES)
+ if (ld >= MAX_LOGICAL_DRIVES_EXT)
continue;
raid = MR_LdRaidGet(ld, map);
for (element = 0; element < MAX_QUAD_DEPTH; element++) {
@@ -1152,90 +1248,105 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
}
-void
-mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
- struct LD_LOAD_BALANCE_INFO *lbInfo)
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
+ struct LD_LOAD_BALANCE_INFO *lbInfo)
{
int ldCount;
u16 ld;
struct MR_LD_RAID *raid;
- for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
- ld = MR_TargetIdToLdGet(ldCount, map);
- if (ld >= MAX_LOGICAL_DRIVES) {
+ if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
+ lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+
+ for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+ ld = MR_TargetIdToLdGet(ldCount, drv_map);
+ if (ld >= MAX_LOGICAL_DRIVES_EXT) {
lbInfo[ldCount].loadBalanceFlag = 0;
continue;
}
- raid = MR_LdRaidGet(ld, map);
-
- /* Two drive Optimal RAID 1 */
- if ((raid->level == 1) && (raid->rowSize == 2) &&
- (raid->spanDepth == 1) && raid->ldState ==
- MR_LD_STATE_OPTIMAL) {
- u32 pd, arRef;
-
- lbInfo[ldCount].loadBalanceFlag = 1;
-
- /* Get the array on which this span is present */
- arRef = MR_LdSpanArrayGet(ld, 0, map);
-
- /* Get the Pd */
- pd = MR_ArPdGet(arRef, 0, map);
- /* Get dev handle from Pd */
- lbInfo[ldCount].raid1DevHandle[0] =
- MR_PdDevHandleGet(pd, map);
- /* Get the Pd */
- pd = MR_ArPdGet(arRef, 1, map);
-
- /* Get the dev handle from Pd */
- lbInfo[ldCount].raid1DevHandle[1] =
- MR_PdDevHandleGet(pd, map);
- } else
+ raid = MR_LdRaidGet(ld, drv_map);
+ if ((raid->level != 1) ||
+ (raid->ldState != MR_LD_STATE_OPTIMAL)) {
lbInfo[ldCount].loadBalanceFlag = 0;
+ continue;
+ }
+ lbInfo[ldCount].loadBalanceFlag = 1;
}
}
-u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
- u32 count)
+u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
{
- u16 pend0, pend1;
+ struct fusion_context *fusion;
+ struct MR_LD_RAID *raid;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
+ u16 pend0, pend1, ld;
u64 diff0, diff1;
- u8 bestArm;
+ u8 bestArm, pd0, pd1, span, arm;
+ u32 arRef, span_row_size;
+
+ u64 block = io_info->ldStartBlock;
+ u32 count = io_info->numBlocks;
+
+ span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
+ >> RAID_CTX_SPANARM_SPAN_SHIFT);
+ arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
+
+
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
+ raid = MR_LdRaidGet(ld, drv_map);
+ span_row_size = instance->UnevenSpanSupport ?
+ SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
+
+ arRef = MR_LdSpanArrayGet(ld, span, drv_map);
+ pd0 = MR_ArPdGet(arRef, arm, drv_map);
+ pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
+ (arm + 1 - span_row_size) : arm + 1, drv_map);
/* get the pending cmds for the data and mirror arms */
- pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
- pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
/* Determine the disk whose head is nearer to the req. block */
- diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
- diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
- bestArm = (diff0 <= diff1 ? 0 : 1);
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+ bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
- /*Make balance count from 16 to 4 to keep driver in sync with Firmware*/
- if ((bestArm == arm && pend0 > pend1 + 4) ||
- (bestArm != arm && pend1 > pend0 + 4))
+ if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
+ (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
bestArm ^= 1;
/* Update the last accessed block on the correct pd */
- lbInfo->last_accessed_block[bestArm] = block + count - 1;
-
- return bestArm;
+ io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+ lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
+ io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+#if SPAN_DEBUG
+ if (arm != bestArm)
+ dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
+ "occur - span 0x%x arm 0x%x bestArm 0x%x "
+ "io_info->span_arm 0x%x\n",
+ span, arm, bestArm, io_info->span_arm);
+#endif
+ return io_info->pd_after_lb;
}
-u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
- struct IO_REQUEST_INFO *io_info)
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+ struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
{
- u8 arm, old_arm;
+ u8 arm_pd;
u16 devHandle;
+ struct fusion_context *fusion;
+ struct MR_DRV_RAID_MAP_ALL *drv_map;
- old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
-
- /* get best new arm */
- arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
- io_info->numBlocks);
- devHandle = lbInfo->raid1DevHandle[arm];
- atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
+ fusion = instance->ctrl_context;
+ drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+ /* get best new arm (PD ID) */
+ arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info);
+ devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+ atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
return devHandle;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 3ed03dfab76c..f37eed682c75 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -50,6 +50,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_dbg.h>
#include "megaraid_sas_fusion.h"
#include "megaraid_sas.h"
@@ -76,8 +77,6 @@ megasas_issue_polled(struct megasas_instance *instance,
void
megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
-u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
- struct IO_REQUEST_INFO *in_info);
int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
void megaraid_sas_kill_hba(struct megasas_instance *instance);
@@ -91,6 +90,8 @@ void megasas_start_timer(struct megasas_instance *instance,
extern struct megasas_mgmt_info megasas_mgmt_info;
extern int resetwaittime;
+
+
/**
* megasas_enable_intr_fusion - Enables interrupts
* @regs: MFI register set
@@ -163,7 +164,7 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
(struct fusion_context *)instance->ctrl_context;
struct megasas_cmd_fusion *cmd = NULL;
- spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+ spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
if (!list_empty(&fusion->cmd_pool)) {
cmd = list_entry((&fusion->cmd_pool)->next,
@@ -173,7 +174,7 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
}
- spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+ spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
return cmd;
}
@@ -182,21 +183,47 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
* @instance: Adapter soft state
* @cmd: Command packet to be returned to free command pool
*/
-static inline void
-megasas_return_cmd_fusion(struct megasas_instance *instance,
- struct megasas_cmd_fusion *cmd)
+inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd)
{
unsigned long flags;
struct fusion_context *fusion =
(struct fusion_context *)instance->ctrl_context;
- spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+ spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
cmd->scmd = NULL;
cmd->sync_cmd_idx = (u32)ULONG_MAX;
- list_add_tail(&cmd->list, &fusion->cmd_pool);
+ list_add(&cmd->list, (&fusion->cmd_pool)->next);
- spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+ spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
+}
+
+/**
+ * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool
+ * @instance: Adapter soft state
+ * @cmd_mfi: MFI Command packet to be returned to free command pool
+ * @cmd_mpt: MPT Command packet to be returned to free command pool
+ */
+inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+ struct megasas_cmd *cmd_mfi,
+ struct megasas_cmd_fusion *cmd_fusion)
+{
+ unsigned long flags;
+
+ /*
+ * TO DO: optimize this code and use only one lock instead of two
+ * locks being used currently- mpt_pool_lock is acquired
+ * inside mfi_pool_lock
+ */
+ spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+ megasas_return_cmd_fusion(instance, cmd_fusion);
+ if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED)
+ dev_err(&instance->pdev->dev, "Possible bug from %s %d\n",
+ __func__, __LINE__);
+ atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED);
+ __megasas_return_cmd(instance, cmd_mfi);
+ spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
}
/**
@@ -562,9 +589,11 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
{
int i;
struct megasas_header *frame_hdr = &cmd->frame->hdr;
+ struct fusion_context *fusion;
u32 msecs = seconds * 1000;
+ fusion = instance->ctrl_context;
/*
* Wait for cmd_status to change
*/
@@ -573,8 +602,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
msleep(20);
}
- if (frame_hdr->cmd_status == 0xff)
+ if (frame_hdr->cmd_status == 0xff) {
+ if (fusion)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
return -ETIME;
+ }
return 0;
}
@@ -650,6 +683,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
/* driver supports HA / Remote LUN over Fast Path interface */
init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
= 1;
+ init_frame->driver_operations.mfi_capabilities.support_max_255lds
+ = 1;
+ init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb
+ = 1;
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -709,6 +746,13 @@ fail_get_cmd:
* Issues an internal command (DCMD) to get the FW's controller PD
* list structure. This information is mainly used to find out SYSTEM
* supported by the FW.
+ * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
+ * dcmd.mbox.b[0] - number of LDs being sync'd
+ * dcmd.mbox.b[1] - 0 - complete command immediately.
+ * - 1 - pend till config change
+ * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
+ * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
+ * uses extended struct MR_FW_RAID_MAP_EXT
*/
static int
megasas_get_ld_map_info(struct megasas_instance *instance)
@@ -716,7 +760,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
int ret = 0;
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
- struct MR_FW_RAID_MAP_ALL *ci;
+ void *ci;
dma_addr_t ci_h = 0;
u32 size_map_info;
struct fusion_context *fusion;
@@ -737,10 +781,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- size_map_info = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+ size_map_info = fusion->current_map_sz;
- ci = fusion->ld_map[(instance->map_id & 1)];
+ ci = (void *) fusion->ld_map[(instance->map_id & 1)];
ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
if (!ci) {
@@ -749,9 +792,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
return -ENOMEM;
}
- memset(ci, 0, sizeof(*ci));
+ memset(ci, 0, fusion->max_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
+#if VD_EXT_DEBUG
+ dev_dbg(&instance->pdev->dev,
+ "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
+ __func__, cpu_to_le32(size_map_info));
+#endif
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -763,14 +810,17 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
- if (!megasas_issue_polled(instance, cmd))
- ret = 0;
- else {
- printk(KERN_ERR "megasas: Get LD Map Info Failed\n");
- ret = -1;
- }
+ if (instance->ctrl_context && !instance->mask_interrupts)
+ ret = megasas_issue_blocked_cmd(instance, cmd,
+ MEGASAS_BLOCKED_CMD_TIMEOUT);
+ else
+ ret = megasas_issue_polled(instance, cmd);
- megasas_return_cmd(instance, cmd);
+ if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+ megasas_return_mfi_mpt_pthr(instance, cmd,
+ cmd->mpt_pthr_cmd_blocked);
+ else
+ megasas_return_cmd(instance, cmd);
return ret;
}
@@ -807,7 +857,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
u32 size_sync_info, num_lds;
struct fusion_context *fusion;
struct MR_LD_TARGET_SYNC *ci = NULL;
- struct MR_FW_RAID_MAP_ALL *map;
+ struct MR_DRV_RAID_MAP_ALL *map;
struct MR_LD_RAID *raid;
struct MR_LD_TARGET_SYNC *ld_sync;
dma_addr_t ci_h = 0;
@@ -828,7 +878,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
return 1;
}
- map = fusion->ld_map[instance->map_id & 1];
+ map = fusion->ld_drv_map[instance->map_id & 1];
num_lds = le32_to_cpu(map->raidMap.ldCount);
@@ -840,7 +890,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
ci = (struct MR_LD_TARGET_SYNC *)
fusion->ld_map[(instance->map_id - 1) & 1];
- memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL));
+ memset(ci, 0, fusion->max_map_sz);
ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
@@ -852,8 +902,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
ld_sync->seqNum = raid->seqNum;
}
- size_map_info = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+ size_map_info = fusion->current_map_sz;
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
@@ -971,7 +1020,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
max_cmd = instance->max_fw_cmds;
- fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16;
+ fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
fusion->request_alloc_sz =
sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
@@ -988,8 +1037,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
fusion->max_sge_in_chain =
MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
- instance->max_num_sge = fusion->max_sge_in_main_msg +
- fusion->max_sge_in_chain - 2;
+ instance->max_num_sge = rounddown_pow_of_two(
+ fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2);
/* Used for pass thru MFI frame (DCMD) */
fusion->chain_offset_mfi_pthru =
@@ -1016,17 +1065,75 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
goto fail_ioc_init;
megasas_display_intel_branding(instance);
+ if (megasas_get_ctrl_info(instance, instance->ctrl_info)) {
+ dev_err(&instance->pdev->dev,
+ "Could not get controller info. Fail from %s %d\n",
+ __func__, __LINE__);
+ goto fail_ioc_init;
+ }
+
+ instance->supportmax256vd =
+ instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+ /* Below is additional check to address future FW enhancement */
+ if (instance->ctrl_info->max_lds > 64)
+ instance->supportmax256vd = 1;
+ instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
+ * MEGASAS_MAX_DEV_PER_CHANNEL;
+ instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
+ * MEGASAS_MAX_DEV_PER_CHANNEL;
+ if (instance->supportmax256vd) {
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ } else {
+ instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+ instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+ }
+ dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n"
+ "Driver supports %d VDs %d PDs\n",
+ instance->fw_supported_vd_count,
+ instance->fw_supported_pd_count,
+ instance->drv_supported_vd_count,
+ instance->drv_supported_pd_count);
instance->flag_ieee = 1;
+ fusion->fast_path_io = 0;
- fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) +
- (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+ fusion->old_map_sz =
+ sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->fw_supported_vd_count - 1));
+ fusion->new_map_sz =
+ sizeof(struct MR_FW_RAID_MAP_EXT);
+ fusion->drv_map_sz =
+ sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
+ (instance->drv_supported_vd_count - 1));
+
+ fusion->drv_map_pages = get_order(fusion->drv_map_sz);
+ for (i = 0; i < 2; i++) {
+ fusion->ld_map[i] = NULL;
+ fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
+ fusion->drv_map_pages);
+ if (!fusion->ld_drv_map[i]) {
+ dev_err(&instance->pdev->dev, "Could not allocate "
+ "memory for local map info for %d pages\n",
+ fusion->drv_map_pages);
+ if (i == 1)
+ free_pages((ulong)fusion->ld_drv_map[0],
+ fusion->drv_map_pages);
+ goto fail_ioc_init;
+ }
+ }
+
+ fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz);
+
+ if (instance->supportmax256vd)
+ fusion->current_map_sz = fusion->new_map_sz;
+ else
+ fusion->current_map_sz = fusion->old_map_sz;
- fusion->fast_path_io = 0;
for (i = 0; i < 2; i++) {
fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
- fusion->map_sz,
+ fusion->max_map_sz,
&fusion->ld_map_phys[i],
GFP_KERNEL);
if (!fusion->ld_map[i]) {
@@ -1043,7 +1150,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
fail_map_info:
if (i == 1)
- dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
+ dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
fusion->ld_map[0], fusion->ld_map_phys[0]);
fail_ioc_init:
megasas_free_cmds_fusion(instance);
@@ -1065,6 +1172,11 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
u32 req_desc_hi,
struct megasas_register_set __iomem *regs)
{
+#if defined(writeq) && defined(CONFIG_64BIT)
+ u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo);
+
+ writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port);
+#else
unsigned long flags;
spin_lock_irqsave(&instance->hba_lock, flags);
@@ -1072,6 +1184,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
spin_unlock_irqrestore(&instance->hba_lock, flags);
+#endif
}
/**
@@ -1224,7 +1337,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
void
megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
- struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
{
struct MR_LD_RAID *raid;
u32 ld;
@@ -1409,7 +1522,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
struct IO_REQUEST_INFO io_info;
struct fusion_context *fusion;
- struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
u8 *raidLUN;
device_id = MEGASAS_DEV_INDEX(instance, scp);
@@ -1486,10 +1599,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
io_info.isRead = 1;
- local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
- MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) {
+ instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
io_request->RaidContext.regLockFlags = 0;
fp_possible = 0;
} else {
@@ -1529,10 +1642,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
(io_info.isRead)) {
io_info.devHandle =
- get_updated_dev_handle(
+ get_updated_dev_handle(instance,
&fusion->load_balance_info[device_id],
&io_info);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+ cmd->pd_r1_lb = io_info.pd_after_lb;
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
@@ -1579,7 +1693,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
u32 device_id;
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
u16 pd_index = 0;
- struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+ struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
struct fusion_context *fusion = instance->ctrl_context;
u8 span, physArm;
u16 devHandle;
@@ -1591,7 +1705,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(instance, scmd);
pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
+scmd->device->id;
- local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+ local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
@@ -1639,7 +1753,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
goto NonFastPath;
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
- if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
+ if ((ld >= instance->fw_supported_vd_count) ||
+ (!fusion->fast_path_io))
goto NonFastPath;
raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -1864,10 +1979,11 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
struct megasas_cmd *cmd_mfi;
struct megasas_cmd_fusion *cmd_fusion;
u16 smid, num_completed;
- u8 reply_descript_type, arm;
+ u8 reply_descript_type;
u32 status, extStatus, device_id;
union desc_value d_val;
struct LD_LOAD_BALANCE_INFO *lbinfo;
+ int threshold_reply_count = 0;
fusion = instance->ctrl_context;
@@ -1914,10 +2030,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
lbinfo = &fusion->load_balance_info[device_id];
if (cmd_fusion->scmd->SCp.Status &
MEGASAS_LOAD_BALANCE_FLAG) {
- arm = lbinfo->raid1DevHandle[0] ==
- cmd_fusion->io_request->DevHandle ? 0 :
- 1;
- atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
+ atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
cmd_fusion->scmd->SCp.Status &=
~MEGASAS_LOAD_BALANCE_FLAG;
}
@@ -1941,10 +2054,19 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
break;
case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+
+ if (!cmd_mfi->mpt_pthr_cmd_blocked) {
+ if (megasas_dbg_lvl == 5)
+ dev_info(&instance->pdev->dev,
+ "freeing mfi/mpt pass-through "
+ "from %s %d\n",
+ __func__, __LINE__);
+ megasas_return_mfi_mpt_pthr(instance, cmd_mfi,
+ cmd_fusion);
+ }
+
megasas_complete_cmd(instance, cmd_mfi, DID_OK);
cmd_fusion->flags = 0;
- megasas_return_cmd_fusion(instance, cmd_fusion);
-
break;
}
@@ -1955,6 +2077,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
desc->Words = ULLONG_MAX;
num_completed++;
+ threshold_reply_count++;
/* Get the next reply descriptor */
if (!fusion->last_reply_idx[MSIxIndex])
@@ -1974,6 +2097,25 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
break;
+ /*
+ * Write to reply post host index register after completing threshold
+ * number of reply counts and still there are more replies in reply queue
+ * pending to be completed
+ */
+ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+ if ((instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device ==
+ PCI_DEVICE_ID_LSI_FURY))
+ writel(((MSIxIndex & 0x7) << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[MSIxIndex/8]);
+ else
+ writel((MSIxIndex << 24) |
+ fusion->last_reply_idx[MSIxIndex],
+ instance->reply_post_host_index_addr[0]);
+ threshold_reply_count = 0;
+ }
}
if (!num_completed)
@@ -2028,7 +2170,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
{
struct megasas_irq_context *irq_context = devp;
struct megasas_instance *instance = irq_context->instance;
- u32 mfiStatus, fw_state;
+ u32 mfiStatus, fw_state, dma_state;
if (instance->mask_interrupts)
return IRQ_NONE;
@@ -2050,7 +2192,16 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
/* If we didn't complete any commands, check for FW fault */
fw_state = instance->instancet->read_fw_status_reg(
instance->reg_set) & MFI_STATE_MASK;
- if (fw_state == MFI_STATE_FAULT) {
+ dma_state = instance->instancet->read_fw_status_reg
+ (instance->reg_set) & MFI_STATE_DMADONE;
+ if (instance->crash_dump_drv_support &&
+ instance->crash_dump_app_support) {
+ /* Start collecting crash, if DMA bit is done */
+ if ((fw_state == MFI_STATE_FAULT) && dma_state)
+ schedule_work(&instance->crash_init);
+ else if (fw_state == MFI_STATE_FAULT)
+ schedule_work(&instance->work_init);
+ } else if (fw_state == MFI_STATE_FAULT) {
printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
"for scsi%d\n", instance->host->host_no);
schedule_work(&instance->work_init);
@@ -2075,6 +2226,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
struct megasas_cmd_fusion *cmd;
struct fusion_context *fusion;
struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
+ u32 opcode;
cmd = megasas_get_cmd_fusion(instance);
if (!cmd)
@@ -2082,9 +2234,20 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
/* Save the smid. To be used for returning the cmd */
mfi_cmd->context.smid = cmd->index;
-
cmd->sync_cmd_idx = mfi_cmd->index;
+ /* Set this only for Blocked commands */
+ opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode);
+ if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+ && (mfi_cmd->frame->dcmd.mbox.b[1] == 1))
+ mfi_cmd->is_wait_event = 1;
+
+ if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+ mfi_cmd->is_wait_event = 1;
+
+ if (mfi_cmd->is_wait_event)
+ mfi_cmd->mpt_pthr_cmd_blocked = cmd;
+
/*
* For cmds where the flag is set, store the flag and check
* on completion. For cmds with this flag, don't call
@@ -2173,6 +2336,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
return;
}
+ atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED);
instance->instancet->fire_cmd(instance, req_desc->u.low,
req_desc->u.high, instance->reg_set);
}
@@ -2203,6 +2367,49 @@ megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
}
/**
+ * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
+ * @instance: Controller's soft instance
+ * return: Number of allocated host crash buffers
+ */
+static void
+megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
+{
+ unsigned int i;
+
+ instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
+ for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
+ instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL,
+ instance->crash_buf_pages);
+ if (!instance->crash_buf[i]) {
+ dev_info(&instance->pdev->dev, "Firmware crash dump "
+ "memory allocation failed at index %d\n", i);
+ break;
+ }
+ }
+ instance->drv_buf_alloc = i;
+}
+
+/**
+ * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
+ * @instance: Controller's soft instance
+ */
+void
+megasas_free_host_crash_buffer(struct megasas_instance *instance)
+{
+ unsigned int i
+;
+ for (i = 0; i < instance->drv_buf_alloc; i++) {
+ if (instance->crash_buf[i])
+ free_pages((ulong)instance->crash_buf[i],
+ instance->crash_buf_pages);
+ }
+ instance->drv_buf_index = 0;
+ instance->drv_buf_alloc = 0;
+ instance->fw_crash_state = UNAVAILABLE;
+ instance->fw_crash_buffer_size = 0;
+}
+
+/**
* megasas_adp_reset_fusion - For controller reset
* @regs: MFI register set
*/
@@ -2345,6 +2552,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
struct megasas_cmd *cmd_mfi;
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
u32 host_diag, abs_state, status_reg, reset_adapter;
+ u32 io_timeout_in_crash_mode = 0;
instance = (struct megasas_instance *)shost->hostdata;
fusion = instance->ctrl_context;
@@ -2355,8 +2563,45 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
"returning FAILED for scsi%d.\n",
instance->host->host_no);
+ mutex_unlock(&instance->reset_mutex);
return FAILED;
}
+ status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+ abs_state = status_reg & MFI_STATE_MASK;
+
+ /* IO timeout detected, forcibly put FW in FAULT state */
+ if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
+ instance->crash_dump_app_support && iotimeout) {
+ dev_info(&instance->pdev->dev, "IO timeout is detected, "
+ "forcibly FAULT Firmware\n");
+ instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+ status_reg = readl(&instance->reg_set->doorbell);
+ writel(status_reg | MFI_STATE_FORCE_OCR,
+ &instance->reg_set->doorbell);
+ readl(&instance->reg_set->doorbell);
+ mutex_unlock(&instance->reset_mutex);
+ do {
+ ssleep(3);
+ io_timeout_in_crash_mode++;
+ dev_dbg(&instance->pdev->dev, "waiting for [%d] "
+ "seconds for crash dump collection and OCR "
+ "to be done\n", (io_timeout_in_crash_mode * 3));
+ } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+ (io_timeout_in_crash_mode < 80));
+
+ if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+ dev_info(&instance->pdev->dev, "OCR done for IO "
+ "timeout case\n");
+ retval = SUCCESS;
+ } else {
+ dev_info(&instance->pdev->dev, "Controller is not "
+ "operational after 240 seconds wait for IO "
+ "timeout case in FW crash dump mode\n do "
+ "OCR/kill adapter\n");
+ retval = megasas_reset_fusion(shost, 0);
+ }
+ return retval;
+ }
if (instance->requestorId && !instance->skip_heartbeat_timer_del)
del_timer_sync(&instance->sriov_heartbeat_timer);
@@ -2563,10 +2808,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->frame->dcmd.opcode ==
cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
- megasas_return_cmd(instance,
- cmd_mfi);
- megasas_return_cmd_fusion(
- instance, cmd_fusion);
+ megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion);
} else {
req_desc =
megasas_get_request_descriptor(
@@ -2603,7 +2845,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
/* Reset load balance info */
memset(fusion->load_balance_info, 0,
sizeof(struct LD_LOAD_BALANCE_INFO)
- *MAX_LOGICAL_DRIVES);
+ *MAX_LOGICAL_DRIVES_EXT);
if (!megasas_get_map_info(instance))
megasas_sync_map_info(instance);
@@ -2623,6 +2865,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
printk(KERN_WARNING "megaraid_sas: Reset "
"successful for scsi%d.\n",
instance->host->host_no);
+
+ if (instance->crash_dump_drv_support) {
+ if (instance->crash_dump_app_support)
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_ON);
+ else
+ megasas_set_crash_dump_params(instance,
+ MR_CRASH_BUF_TURN_OFF);
+ }
retval = SUCCESS;
goto out;
}
@@ -2651,6 +2902,74 @@ out:
return retval;
}
+/* Fusion Crash dump collection work queue */
+void megasas_fusion_crash_dump_wq(struct work_struct *work)
+{
+ struct megasas_instance *instance =
+ container_of(work, struct megasas_instance, crash_init);
+ u32 status_reg;
+ u8 partial_copy = 0;
+
+
+ status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+
+ /*
+ * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
+ * to host crash buffers
+ */
+ if (instance->drv_buf_index == 0) {
+ /* Buffer is already allocated for old Crash dump.
+ * Do OCR and do not wait for crash dump collection
+ */
+ if (instance->drv_buf_alloc) {
+ dev_info(&instance->pdev->dev, "earlier crash dump is "
+ "not yet copied by application, ignoring this "
+ "crash dump and initiating OCR\n");
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ writel(status_reg,
+ &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ return;
+ }
+ megasas_alloc_host_crash_buffer(instance);
+ dev_info(&instance->pdev->dev, "Number of host crash buffers "
+ "allocated: %d\n", instance->drv_buf_alloc);
+ }
+
+ /*
+ * Driver has allocated max buffers, which can be allocated
+ * and FW has more crash dump data, then driver will
+ * ignore the data.
+ */
+ if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
+ dev_info(&instance->pdev->dev, "Driver is done copying "
+ "the buffer: %d\n", instance->drv_buf_alloc);
+ status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+ partial_copy = 1;
+ } else {
+ memcpy(instance->crash_buf[instance->drv_buf_index],
+ instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+ instance->drv_buf_index++;
+ status_reg &= ~MFI_STATE_DMADONE;
+ }
+
+ if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
+ dev_info(&instance->pdev->dev, "Crash Dump is available,number "
+ "of copied buffers: %d\n", instance->drv_buf_index);
+ instance->fw_crash_buffer_size = instance->drv_buf_index;
+ instance->fw_crash_state = AVAILABLE;
+ instance->drv_buf_index = 0;
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ if (!partial_copy)
+ megasas_reset_fusion(instance->host, 0);
+ } else {
+ writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+ readl(&instance->reg_set->outbound_scratch_pad);
+ }
+}
+
+
/* Fusion OCR work queue */
void megasas_fusion_ocr_wq(struct work_struct *work)
{
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index e76af5459a09..0d183d521bdd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -86,6 +86,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define MEGASAS_FP_CMD_LEN 16
#define MEGASAS_FUSION_IN_RESET 0
+#define THRESHOLD_REPLY_COUNT 50
/*
* Raid Context structure which describes MegaRAID specific IO Parameters
@@ -478,10 +479,13 @@ struct MPI2_IOC_INIT_REQUEST {
#define MAX_ROW_SIZE 32
#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
#define MAX_LOGICAL_DRIVES 64
+#define MAX_LOGICAL_DRIVES_EXT 256
#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
#define MAX_ARRAYS 128
#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_ARRAYS_EXT 256
+#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
#define MAX_PHYSICAL_DEVICES 256
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
@@ -601,7 +605,6 @@ struct MR_FW_RAID_MAP {
u32 maxArrays;
} validationInfo;
u32 version[5];
- u32 reserved1[5];
};
u32 ldCount;
@@ -627,6 +630,8 @@ struct IO_REQUEST_INFO {
u8 start_span;
u8 reserved;
u64 start_row;
+ u8 span_arm; /* span[7:5], arm[4:0] */
+ u8 pd_after_lb;
};
struct MR_LD_TARGET_SYNC {
@@ -678,14 +683,14 @@ struct megasas_cmd_fusion {
u32 sync_cmd_idx;
u32 index;
u8 flags;
+ u8 pd_r1_lb;
};
struct LD_LOAD_BALANCE_INFO {
u8 loadBalanceFlag;
u8 reserved1;
- u16 raid1DevHandle[2];
- atomic_t scsi_pending_cmds[2];
- u64 last_accessed_block[2];
+ atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
+ u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
};
/* SPAN_SET is info caclulated from span info from Raid map per LD */
@@ -713,11 +718,86 @@ struct MR_FW_RAID_MAP_ALL {
struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
} __attribute__ ((packed));
+struct MR_DRV_RAID_MAP {
+ /* total size of this structure, including this field.
+ * This feild will be manupulated by driver for ext raid map,
+ * else pick the value from firmware raid map.
+ */
+ u32 totalSize;
+
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ /* timeout value used by driver in FP IOs*/
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+
+ u16 ldCount;
+ u16 arCount;
+ u16 spanCount;
+ u16 reserve3;
+
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_LD_SPAN_MAP ldSpanMap[1];
+
+};
+
+/* Driver raid map size is same as raid map ext
+ * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
+ * And it is mainly for code re-use purpose.
+ */
+struct MR_DRV_RAID_MAP_ALL {
+
+ struct MR_DRV_RAID_MAP raidMap;
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+} __packed;
+
+
+
+struct MR_FW_RAID_MAP_EXT {
+ /* Not usred in new map */
+ u32 reserved;
+
+ union {
+ struct {
+ u32 maxLd;
+ u32 maxSpanDepth;
+ u32 maxRowSize;
+ u32 maxPdCount;
+ u32 maxArrays;
+ } validationInfo;
+ u32 version[5];
+ };
+
+ u8 fpPdIoTimeoutSec;
+ u8 reserved2[7];
+
+ u16 ldCount;
+ u16 arCount;
+ u16 spanCount;
+ u16 reserve3;
+
+ struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+ u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+ struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
+ struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
+};
+
struct fusion_context {
struct megasas_cmd_fusion **cmd_list;
struct list_head cmd_pool;
- spinlock_t cmd_pool_lock;
+ spinlock_t mpt_pool_lock;
dma_addr_t req_frames_desc_phys;
u8 *req_frames_desc;
@@ -749,10 +829,18 @@ struct fusion_context {
struct MR_FW_RAID_MAP_ALL *ld_map[2];
dma_addr_t ld_map_phys[2];
- u32 map_sz;
+ /*Non dma-able memory. Driver local copy.*/
+ struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
+
+ u32 max_map_sz;
+ u32 current_map_sz;
+ u32 old_map_sz;
+ u32 new_map_sz;
+ u32 drv_map_sz;
+ u32 drv_map_pages;
u8 fast_path_io;
- struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
- LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
+ struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+ LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
};
union desc_value {
@@ -763,4 +851,5 @@ union desc_value {
} u;
};
+
#endif /* _MEGARAID_SAS_FUSION_H_ */