diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-29 01:48:06 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-29 01:48:06 +0300 |
commit | 938edb8a31b976c9a92eb0cd4ff481e93f76c1f1 (patch) | |
tree | 0854d5f6859d51032f1d853eaa8ab0e8647fb0cb /drivers/scsi/smartpqi | |
parent | af7ddd8a627c62a835524b3f5b471edbbbcce025 (diff) | |
parent | da7903092b880b25971ca9103cb0b934a44ace2b (diff) | |
download | linux-938edb8a31b976c9a92eb0cd4ff481e93f76c1f1.tar.xz |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This is mostly update of the usual drivers: smarpqi, lpfc, qedi,
megaraid_sas, libsas, zfcp, mpt3sas, hisi_sas.
Additionally, we have a pile of annotation, unused variable and minor
updates.
The big API change is the updates for Christoph's DMA rework which
include removing the DISABLE_CLUSTERING flag.
And finally there are a couple of target tree updates"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (259 commits)
scsi: isci: request: mark expected switch fall-through
scsi: isci: remote_node_context: mark expected switch fall-throughs
scsi: isci: remote_device: Mark expected switch fall-throughs
scsi: isci: phy: Mark expected switch fall-through
scsi: iscsi: Capture iscsi debug messages using tracepoints
scsi: myrb: Mark expected switch fall-throughs
scsi: megaraid: fix out-of-bound array accesses
scsi: mpt3sas: mpt3sas_scsih: Mark expected switch fall-through
scsi: fcoe: remove set but not used variable 'port'
scsi: smartpqi: call pqi_free_interrupts() in pqi_shutdown()
scsi: smartpqi: fix build warnings
scsi: smartpqi: update driver version
scsi: smartpqi: add ofa support
scsi: smartpqi: increase fw status register read timeout
scsi: smartpqi: bump driver version
scsi: smartpqi: add smp_utils support
scsi: smartpqi: correct lun reset issues
scsi: smartpqi: correct volume status
scsi: smartpqi: do not offline disks for transient did no connect conditions
scsi: smartpqi: allow for larger raid maps
...
Diffstat (limited to 'drivers/scsi/smartpqi')
-rw-r--r-- | drivers/scsi/smartpqi/smartpqi.h | 216 | ||||
-rw-r--r-- | drivers/scsi/smartpqi/smartpqi_init.c | 1558 | ||||
-rw-r--r-- | drivers/scsi/smartpqi/smartpqi_sas_transport.c | 164 | ||||
-rw-r--r-- | drivers/scsi/smartpqi/smartpqi_sis.c | 15 | ||||
-rw-r--r-- | drivers/scsi/smartpqi/smartpqi_sis.h | 1 |
5 files changed, 1748 insertions, 206 deletions
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index e97bf2670315..af962368818b 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -21,6 +21,9 @@ #if !defined(_SMARTPQI_H) #define _SMARTPQI_H +#include <scsi/scsi_host.h> +#include <linux/bsg-lib.h> + #pragma pack(1) #define PQI_DEVICE_SIGNATURE "PQI DREG" @@ -97,6 +100,12 @@ struct pqi_ctrl_registers { struct pqi_device_registers pqi_registers; /* 4000h */ }; +#if ((HZ) < 1000) +#define PQI_HZ 1000 +#else +#define PQI_HZ (HZ) +#endif + #define PQI_DEVICE_REGISTERS_OFFSET 0x4000 enum pqi_io_path { @@ -347,6 +356,10 @@ struct pqi_event_config { #define PQI_MAX_EVENT_DESCRIPTORS 255 +#define PQI_EVENT_OFA_MEMORY_ALLOCATION 0x0 +#define PQI_EVENT_OFA_QUIESCE 0x1 +#define PQI_EVENT_OFA_CANCELLED 0x2 + struct pqi_event_response { struct pqi_iu_header header; u8 event_type; @@ -354,7 +367,17 @@ struct pqi_event_response { u8 request_acknowlege : 1; __le16 event_id; __le32 additional_event_id; - u8 data[16]; + union { + struct { + __le32 bytes_requested; + u8 reserved[12]; + } ofa_memory_allocation; + + struct { + __le16 reason; /* reason for cancellation */ + u8 reserved[14]; + } ofa_cancelled; + } data; }; struct pqi_event_acknowledge_request { @@ -389,6 +412,54 @@ struct pqi_task_management_response { u8 response_code; }; +struct pqi_vendor_general_request { + struct pqi_iu_header header; + __le16 request_id; + __le16 function_code; + union { + struct { + __le16 first_section; + __le16 last_section; + u8 reserved[48]; + } config_table_update; + + struct { + __le64 buffer_address; + __le32 buffer_length; + u8 reserved[40]; + } ofa_memory_allocation; + } data; +}; + +struct pqi_vendor_general_response { + struct pqi_iu_header header; + __le16 request_id; + __le16 function_code; + __le16 status; + u8 reserved[2]; +}; + +#define PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE 0 +#define PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE 1 + +#define PQI_OFA_VERSION 1 +#define PQI_OFA_SIGNATURE "OFA_QRM" +#define PQI_OFA_MAX_SG_DESCRIPTORS 64 + +#define PQI_OFA_MEMORY_DESCRIPTOR_LENGTH \ + (offsetof(struct pqi_ofa_memory, sg_descriptor) + \ + (PQI_OFA_MAX_SG_DESCRIPTORS * sizeof(struct pqi_sg_descriptor))) + +struct pqi_ofa_memory { + __le64 signature; /* "OFA_QRM" */ + __le16 version; /* version of this struct(1 = 1st version) */ + u8 reserved[62]; + __le32 bytes_allocated; /* total allocated memory in bytes */ + __le16 num_memory_descriptors; + u8 reserved1[2]; + struct pqi_sg_descriptor sg_descriptor[1]; +}; + struct pqi_aio_error_info { u8 status; u8 service_response; @@ -419,6 +490,7 @@ struct pqi_raid_error_info { #define PQI_REQUEST_IU_GENERAL_ADMIN 0x60 #define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72 #define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73 +#define PQI_REQUEST_IU_VENDOR_GENERAL 0x75 #define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6 #define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81 @@ -430,6 +502,7 @@ struct pqi_raid_error_info { #define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3 #define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4 #define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5 +#define PQI_RESPONSE_IU_VENDOR_GENERAL 0xf7 #define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0 #define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10 @@ -492,6 +565,7 @@ struct pqi_raid_error_info { #define PQI_EVENT_TYPE_HARDWARE 0x2 #define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4 #define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5 +#define PQI_EVENT_TYPE_OFA 0xfb #define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd #define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe @@ -556,6 +630,7 @@ typedef u32 pqi_index_t; #define SOP_TASK_ATTRIBUTE_ACA 4 #define SOP_TMF_COMPLETE 0x0 +#define SOP_TMF_REJECTED 0x4 #define SOP_TMF_FUNCTION_SUCCEEDED 0x8 /* additional CDB bytes usage field codes */ @@ -644,11 +719,13 @@ struct pqi_encryption_info { #define PQI_CONFIG_TABLE_MAX_LENGTH ((u16)~0) /* configuration table section IDs */ +#define PQI_CONFIG_TABLE_ALL_SECTIONS (-1) #define PQI_CONFIG_TABLE_SECTION_GENERAL_INFO 0 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES 1 #define PQI_CONFIG_TABLE_SECTION_FIRMWARE_ERRATA 2 #define PQI_CONFIG_TABLE_SECTION_DEBUG 3 #define PQI_CONFIG_TABLE_SECTION_HEARTBEAT 4 +#define PQI_CONFIG_TABLE_SECTION_SOFT_RESET 5 struct pqi_config_table { u8 signature[8]; /* "CFGTABLE" */ @@ -680,6 +757,18 @@ struct pqi_config_table_general_info { /* command */ }; +struct pqi_config_table_firmware_features { + struct pqi_config_table_section_header header; + __le16 num_elements; + u8 features_supported[]; +/* u8 features_requested_by_host[]; */ +/* u8 features_enabled[]; */ +}; + +#define PQI_FIRMWARE_FEATURE_OFA 0 +#define PQI_FIRMWARE_FEATURE_SMP 1 +#define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 + struct pqi_config_table_debug { struct pqi_config_table_section_header header; __le32 scratchpad; @@ -690,6 +779,22 @@ struct pqi_config_table_heartbeat { __le32 heartbeat_counter; }; +struct pqi_config_table_soft_reset { + struct pqi_config_table_section_header header; + u8 soft_reset_status; +}; + +#define PQI_SOFT_RESET_INITIATE 0x1 +#define PQI_SOFT_RESET_ABORT 0x2 + +enum pqi_soft_reset_status { + RESET_INITIATE_FIRMWARE, + RESET_INITIATE_DRIVER, + RESET_ABORT, + RESET_NORESPONSE, + RESET_TIMEDOUT +}; + union pqi_reset_register { struct { u32 reset_type : 3; @@ -808,8 +913,10 @@ struct pqi_scsi_dev { u8 scsi3addr[8]; __be64 wwid; u8 volume_id[16]; + u8 unique_id[16]; u8 is_physical_device : 1; u8 is_external_raid_device : 1; + u8 is_expander_smp_device : 1; u8 target_lun_valid : 1; u8 device_gone : 1; u8 new_device : 1; @@ -817,6 +924,7 @@ struct pqi_scsi_dev { u8 volume_offline : 1; bool aio_enabled; /* only valid for physical disks */ bool in_reset; + bool in_remove; bool device_offline; u8 vendor[8]; /* bytes 8-15 of inquiry data */ u8 model[16]; /* bytes 16-31 of inquiry data */ @@ -854,6 +962,8 @@ struct pqi_scsi_dev { #define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */ #define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */ #define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */ +#define SCSI_VPD_HEADER_SZ 4 +#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */ #define VPD_PAGE (1 << 8) @@ -916,6 +1026,7 @@ struct pqi_sas_node { struct pqi_sas_port { struct list_head port_list_entry; u64 sas_address; + struct pqi_scsi_dev *device; struct sas_port *port; int next_phy_index; struct list_head phy_list_head; @@ -947,13 +1058,15 @@ struct pqi_io_request { struct list_head request_list_entry; }; -#define PQI_NUM_SUPPORTED_EVENTS 6 +#define PQI_NUM_SUPPORTED_EVENTS 7 struct pqi_event { bool pending; u8 event_type; __le16 event_id; __le32 additional_event_id; + __le32 ofa_bytes_requested; + __le16 ofa_cancel_reason; }; #define PQI_RESERVED_IO_SLOTS_LUN_RESET 1 @@ -1014,12 +1127,16 @@ struct pqi_ctrl_info { struct mutex scan_mutex; struct mutex lun_reset_mutex; + struct mutex ofa_mutex; /* serialize ofa */ bool controller_online; bool block_requests; + bool in_shutdown; + bool in_ofa; u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; u8 pqi_reset_quiesce_supported : 1; + u8 soft_reset_handshake_supported : 1; struct list_head scsi_device_list; spinlock_t scsi_device_list_lock; @@ -1040,6 +1157,7 @@ struct pqi_ctrl_info { int previous_num_interrupts; u32 previous_heartbeat_count; __le32 __iomem *heartbeat_counter; + u8 __iomem *soft_reset_status; struct timer_list heartbeat_timer; struct work_struct ctrl_offline_work; @@ -1051,6 +1169,10 @@ struct pqi_ctrl_info { struct list_head raid_bypass_retry_list; spinlock_t raid_bypass_retry_list_lock; struct work_struct raid_bypass_retry_work; + + struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; + dma_addr_t pqi_ofa_mem_dma_handle; + void **pqi_ofa_chunk_virt_addr; }; enum pqi_ctrl_mode { @@ -1080,8 +1202,13 @@ enum pqi_ctrl_mode { #define BMIC_WRITE 0x27 #define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64 #define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66 +#define BMIC_CSMI_PASSTHRU 0x68 #define BMIC_WRITE_HOST_WELLNESS 0xa5 #define BMIC_FLUSH_CACHE 0xc2 +#define BMIC_SET_DIAG_OPTIONS 0xf4 +#define BMIC_SENSE_DIAG_OPTIONS 0xf5 + +#define CSMI_CC_SAS_SMP_PASSTHRU 0X17 #define SA_FLUSH_CACHE 0x1 @@ -1109,6 +1236,10 @@ struct bmic_identify_controller { u8 reserved3[32]; }; +#define SA_EXPANDER_SMP_DEVICE 0x05 +/*SCSI Invalid Device Type for SAS devices*/ +#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff + struct bmic_identify_physical_device { u8 scsi_bus; /* SCSI Bus number on controller */ u8 scsi_id; /* SCSI ID on this bus */ @@ -1189,6 +1320,50 @@ struct bmic_identify_physical_device { u8 padding_to_multiple_of_512[9]; }; +struct bmic_smp_request { + u8 frame_type; + u8 function; + u8 allocated_response_length; + u8 request_length; + u8 additional_request_bytes[1016]; +}; + +struct bmic_smp_response { + u8 frame_type; + u8 function; + u8 function_result; + u8 response_length; + u8 additional_response_bytes[1016]; +}; + +struct bmic_csmi_ioctl_header { + __le32 header_length; + u8 signature[8]; + __le32 timeout; + __le32 control_code; + __le32 return_code; + __le32 length; +}; + +struct bmic_csmi_smp_passthru { + u8 phy_identifier; + u8 port_identifier; + u8 connection_rate; + u8 reserved; + __be64 destination_sas_address; + __le32 request_length; + struct bmic_smp_request request; + u8 connection_status; + u8 reserved1[3]; + __le32 response_length; + struct bmic_smp_response response; +}; + +struct bmic_csmi_smp_passthru_buffer { + struct bmic_csmi_ioctl_header ioctl_header; + struct bmic_csmi_smp_passthru parameters; +}; + struct bmic_flush_cache { u8 disable_flag; u8 system_power_action; @@ -1206,8 +1381,42 @@ enum bmic_flush_cache_shutdown_event { RESTART = 4 }; +struct bmic_diag_options { + __le32 options; +}; + #pragma pack() +static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) +{ + void *hostdata = shost_priv(shost); + + return *((struct pqi_ctrl_info **)hostdata); +} + +static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) +{ + return !ctrl_info->controller_online; +} + +static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) +{ + atomic_inc(&ctrl_info->num_busy_threads); +} + +static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) +{ + atomic_dec(&ctrl_info->num_busy_threads); +} + +static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->block_requests; +} + +void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy); + int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info); void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info); int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, @@ -1216,6 +1425,9 @@ void pqi_remove_sas_device(struct pqi_scsi_dev *device); struct pqi_scsi_dev *pqi_find_device_by_sas_rphy( struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy); void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd); +int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, + struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, + struct pqi_raid_error_info *error_info); extern struct sas_function_template pqi_sas_transport_functions; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index bac084260d80..e2fa3f476227 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -40,11 +40,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "1.1.4-130" +#define DRIVER_VERSION "1.2.4-070" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 1 +#define DRIVER_MINOR 2 #define DRIVER_RELEASE 4 -#define DRIVER_REVISION 130 +#define DRIVER_REVISION 70 #define DRIVER_NAME "Microsemi PQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -74,6 +74,15 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb, unsigned int cdb_length, struct pqi_queue_group *queue_group, struct pqi_encryption_info *encryption_info, bool raid_bypass); +static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info); +static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info); +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info); +static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, + u32 bytes_requested); +static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info); +static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info); +static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device, unsigned long timeout_secs); /* for flags argument to pqi_submit_raid_request_synchronous() */ #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1 @@ -113,6 +122,7 @@ static unsigned int pqi_supported_event_types[] = { PQI_EVENT_TYPE_HARDWARE, PQI_EVENT_TYPE_PHYSICAL_DEVICE, PQI_EVENT_TYPE_LOGICAL_DEVICE, + PQI_EVENT_TYPE_OFA, PQI_EVENT_TYPE_AIO_STATE_CHANGE, PQI_EVENT_TYPE_AIO_CONFIG_CHANGE, }; @@ -176,16 +186,14 @@ static inline void pqi_scsi_done(struct scsi_cmnd *scmd) scmd->scsi_done(scmd); } -static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) +static inline void pqi_disable_write_same(struct scsi_device *sdev) { - return memcmp(scsi3addr1, scsi3addr2, 8) == 0; + sdev->no_write_same = 1; } -static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) +static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2) { - void *hostdata = shost_priv(shost); - - return *((struct pqi_ctrl_info **)hostdata); + return memcmp(scsi3addr1, scsi3addr2, 8) == 0; } static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device) @@ -198,11 +206,6 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) return scsi3addr[2] != 0; } -static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) -{ - return !ctrl_info->controller_online; -} - static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) { if (ctrl_info->controller_online) @@ -241,11 +244,6 @@ static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info) scsi_unblock_requests(ctrl_info->scsi_host); } -static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) -{ - return ctrl_info->block_requests; -} - static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, unsigned long timeout_msecs) { @@ -275,16 +273,6 @@ static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info, return remaining_msecs; } -static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) -{ - atomic_inc(&ctrl_info->num_busy_threads); -} - -static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) -{ - atomic_dec(&ctrl_info->num_busy_threads); -} - static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info) { while (atomic_read(&ctrl_info->num_busy_threads) > @@ -312,11 +300,39 @@ static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device) return device->in_reset; } +static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->in_ofa = true; +} + +static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->in_ofa = false; +} + +static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->in_ofa; +} + +static inline void pqi_device_remove_start(struct pqi_scsi_dev *device) +{ + device->in_remove = true; +} + +static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + return device->in_remove & !ctrl_info->in_shutdown; +} + static inline void pqi_schedule_rescan_worker_with_delay( struct pqi_ctrl_info *ctrl_info, unsigned long delay) { if (pqi_ctrl_offline(ctrl_info)) return; + if (pqi_ctrl_in_ofa(ctrl_info)) + return; schedule_delayed_work(&ctrl_info->rescan_work, delay); } @@ -326,7 +342,7 @@ static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info) pqi_schedule_rescan_worker_with_delay(ctrl_info, 0); } -#define PQI_RESCAN_WORK_DELAY (10 * HZ) +#define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ) static inline void pqi_schedule_rescan_worker_delayed( struct pqi_ctrl_info *ctrl_info) @@ -347,6 +363,27 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) return readl(ctrl_info->heartbeat_counter); } +static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) +{ + if (!ctrl_info->soft_reset_status) + return 0; + + return readb(ctrl_info->soft_reset_status); +} + +static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, + u8 clear) +{ + u8 status; + + if (!ctrl_info->soft_reset_status) + return; + + status = pqi_read_soft_reset_status(ctrl_info); + status &= ~clear; + writeb(status, ctrl_info->soft_reset_status); +} + static int pqi_map_single(struct pci_dev *pci_dev, struct pqi_sg_descriptor *sg_descriptor, void *buffer, size_t buffer_length, enum dma_data_direction data_direction) @@ -390,6 +427,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, u16 vpd_page, enum dma_data_direction *dir) { u8 *cdb; + size_t cdb_length = buffer_length; memset(request, 0, sizeof(*request)); @@ -412,7 +450,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, cdb[1] = 0x1; cdb[2] = (u8)vpd_page; } - cdb[4] = (u8)buffer_length; + cdb[4] = (u8)cdb_length; break; case CISS_REPORT_LOG: case CISS_REPORT_PHYS: @@ -422,32 +460,45 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, cdb[1] = CISS_REPORT_PHYS_EXTENDED; else cdb[1] = CISS_REPORT_LOG_EXTENDED; - put_unaligned_be32(buffer_length, &cdb[6]); + put_unaligned_be32(cdb_length, &cdb[6]); break; case CISS_GET_RAID_MAP: request->data_direction = SOP_READ_FLAG; cdb[0] = CISS_READ; cdb[1] = CISS_GET_RAID_MAP; - put_unaligned_be32(buffer_length, &cdb[6]); + put_unaligned_be32(cdb_length, &cdb[6]); break; case SA_FLUSH_CACHE: request->data_direction = SOP_WRITE_FLAG; cdb[0] = BMIC_WRITE; cdb[6] = BMIC_FLUSH_CACHE; - put_unaligned_be16(buffer_length, &cdb[7]); + put_unaligned_be16(cdb_length, &cdb[7]); break; + case BMIC_SENSE_DIAG_OPTIONS: + cdb_length = 0; + /* fall through */ case BMIC_IDENTIFY_CONTROLLER: case BMIC_IDENTIFY_PHYSICAL_DEVICE: request->data_direction = SOP_READ_FLAG; cdb[0] = BMIC_READ; cdb[6] = cmd; - put_unaligned_be16(buffer_length, &cdb[7]); + put_unaligned_be16(cdb_length, &cdb[7]); break; + case BMIC_SET_DIAG_OPTIONS: + cdb_length = 0; + /* fall through */ case BMIC_WRITE_HOST_WELLNESS: request->data_direction = SOP_WRITE_FLAG; cdb[0] = BMIC_WRITE; cdb[6] = cmd; - put_unaligned_be16(buffer_length, &cdb[7]); + put_unaligned_be16(cdb_length, &cdb[7]); + break; + case BMIC_CSMI_PASSTHRU: + request->data_direction = SOP_BIDIRECTIONAL; + cdb[0] = BMIC_WRITE; + cdb[5] = CSMI_CC_SAS_SMP_PASSTHRU; + cdb[6] = cmd; + put_unaligned_be16(cdb_length, &cdb[7]); break; default: dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", @@ -509,43 +560,130 @@ static void pqi_free_io_request(struct pqi_io_request *io_request) atomic_dec(&io_request->refcount); } -static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, - struct bmic_identify_controller *buffer) +static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, + u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, + struct pqi_raid_error_info *error_info, + unsigned long timeout_msecs) { int rc; enum dma_data_direction dir; struct pqi_raid_path_request request; rc = pqi_build_raid_path_request(ctrl_info, &request, - BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer, - sizeof(*buffer), 0, &dir); + cmd, scsi3addr, buffer, + buffer_length, vpd_page, &dir); if (rc) return rc; - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, - NULL, NO_TIMEOUT); + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, + 0, error_info, timeout_msecs); pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); return rc; } -static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, +/* Helper functions for pqi_send_scsi_raid_request */ + +static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, + u8 cmd, void *buffer, size_t buffer_length) +{ + return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, + buffer, buffer_length, 0, NULL, NO_TIMEOUT); +} + +static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, + u8 cmd, void *buffer, size_t buffer_length, + struct pqi_raid_error_info *error_info) +{ + return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, + buffer, buffer_length, 0, error_info, NO_TIMEOUT); +} + + +static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, + struct bmic_identify_controller *buffer) +{ + return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, + buffer, sizeof(*buffer)); +} + +static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length) { + return pqi_send_scsi_raid_request(ctrl_info, INQUIRY, scsi3addr, + buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); +} + +static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info, + u8 *scsi3addr, u16 vpd_page) +{ int rc; - enum dma_data_direction dir; - struct pqi_raid_path_request request; + int i; + int pages; + unsigned char *buf, bufsize; - rc = pqi_build_raid_path_request(ctrl_info, &request, - INQUIRY, scsi3addr, buffer, buffer_length, vpd_page, - &dir); - if (rc) - return rc; + buf = kzalloc(256, GFP_KERNEL); + if (!buf) + return false; - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, - NULL, NO_TIMEOUT); + /* Get the size of the page list first */ + rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, + VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, + buf, SCSI_VPD_HEADER_SZ); + if (rc != 0) + goto exit_unsupported; + + pages = buf[3]; + if ((pages + SCSI_VPD_HEADER_SZ) <= 255) + bufsize = pages + SCSI_VPD_HEADER_SZ; + else + bufsize = 255; + + /* Get the whole VPD page list */ + rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, + VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, + buf, bufsize); + if (rc != 0) + goto exit_unsupported; + + pages = buf[3]; + for (i = 1; i <= pages; i++) + if (buf[3 + i] == vpd_page) + goto exit_supported; + +exit_unsupported: + kfree(buf); + return false; + +exit_supported: + kfree(buf); + return true; +} + +static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info, + u8 *scsi3addr, u8 *device_id, int buflen) +{ + int rc; + unsigned char *buf; + + if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID)) + return 1; /* function not supported */ + + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, + VPD_PAGE | SCSI_VPD_DEVICE_ID, + buf, 64); + if (rc == 0) { + if (buflen > 16) + buflen = 16; + memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen); + } + + kfree(buf); - pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); return rc; } @@ -580,9 +718,7 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, enum bmic_flush_cache_shutdown_event shutdown_event) { int rc; - struct pqi_raid_path_request request; struct bmic_flush_cache *flush_cache; - enum dma_data_direction dir; /* * Don't bother trying to flush the cache if the controller is @@ -597,42 +733,55 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info, flush_cache->shutdown_event = shutdown_event; - rc = pqi_build_raid_path_request(ctrl_info, &request, - SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache, - sizeof(*flush_cache), 0, &dir); - if (rc) - goto out; - - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - 0, NULL, NO_TIMEOUT); + rc = pqi_send_ctrl_raid_request(ctrl_info, SA_FLUSH_CACHE, flush_cache, + sizeof(*flush_cache)); - pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); -out: kfree(flush_cache); return rc; } -static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, - void *buffer, size_t buffer_length) +int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, + struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length, + struct pqi_raid_error_info *error_info) +{ + return pqi_send_ctrl_raid_with_error(ctrl_info, BMIC_CSMI_PASSTHRU, + buffer, buffer_length, error_info); +} + +#define PQI_FETCH_PTRAID_DATA (1UL<<31) + +static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) { int rc; - struct pqi_raid_path_request request; - enum dma_data_direction dir; + struct bmic_diag_options *diag; - rc = pqi_build_raid_path_request(ctrl_info, &request, - BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer, - buffer_length, 0, &dir); + diag = kzalloc(sizeof(*diag), GFP_KERNEL); + if (!diag) + return -ENOMEM; + + rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, + diag, sizeof(*diag)); if (rc) - return rc; + goto out; - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - 0, NULL, NO_TIMEOUT); + diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); + + rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, + diag, sizeof(*diag)); +out: + kfree(diag); - pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); return rc; } +static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, + void *buffer, size_t buffer_length) +{ + return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, + buffer, buffer_length); +} + #pragma pack(1) struct bmic_host_wellness_driver_version { @@ -640,6 +789,7 @@ struct bmic_host_wellness_driver_version { u8 driver_version_tag[2]; __le16 driver_version_length; char driver_version[32]; + u8 dont_write_tag[2]; u8 end_tag[2]; }; @@ -669,6 +819,8 @@ static int pqi_write_driver_version_to_host_wellness( strncpy(buffer->driver_version, "Linux " DRIVER_VERSION, sizeof(buffer->driver_version) - 1); buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0'; + buffer->dont_write_tag[0] = 'D'; + buffer->dont_write_tag[1] = 'W'; buffer->end_tag[0] = 'Z'; buffer->end_tag[1] = 'Z'; @@ -742,7 +894,7 @@ static int pqi_write_current_time_to_host_wellness( return rc; } -#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ) +#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ) static void pqi_update_time_worker(struct work_struct *work) { @@ -776,23 +928,11 @@ static inline void pqi_cancel_update_time_worker( cancel_delayed_work_sync(&ctrl_info->update_time_work); } -static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, +static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, size_t buffer_length) { - int rc; - enum dma_data_direction dir; - struct pqi_raid_path_request request; - - rc = pqi_build_raid_path_request(ctrl_info, &request, - cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir); - if (rc) - return rc; - - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, - NULL, NO_TIMEOUT); - - pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); - return rc; + return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, + buffer_length); } static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, @@ -1010,8 +1150,6 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, char *err_msg; u32 raid_map_size; u32 r5or6_blocks_per_row; - unsigned int num_phys_disks; - unsigned int num_raid_map_entries; raid_map_size = get_unaligned_le32(&raid_map->structure_size); @@ -1020,22 +1158,6 @@ static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info, goto bad_raid_map; } - if (raid_map_size > sizeof(*raid_map)) { - err_msg = "RAID map too large"; - goto bad_raid_map; - } - - num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) * - (get_unaligned_le16(&raid_map->data_disks_per_row) + - get_unaligned_le16(&raid_map->metadata_disks_per_row)); - num_raid_map_entries = num_phys_disks * - get_unaligned_le16(&raid_map->row_cnt); - - if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) { - err_msg = "invalid number of map entries in RAID map"; - goto bad_raid_map; - } - if (device->raid_level == SA_RAID_1) { if (get_unaligned_le16(&raid_map->layout_map_count) != 2) { err_msg = "invalid RAID-1 map"; @@ -1074,27 +1196,45 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { int rc; - enum dma_data_direction dir; - struct pqi_raid_path_request request; + u32 raid_map_size; struct raid_map *raid_map; raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL); if (!raid_map) return -ENOMEM; - rc = pqi_build_raid_path_request(ctrl_info, &request, - CISS_GET_RAID_MAP, device->scsi3addr, raid_map, - sizeof(*raid_map), 0, &dir); + rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, + device->scsi3addr, raid_map, sizeof(*raid_map), + 0, NULL, NO_TIMEOUT); + if (rc) goto error; - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, - NULL, NO_TIMEOUT); + raid_map_size = get_unaligned_le32(&raid_map->structure_size); - pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + if (raid_map_size > sizeof(*raid_map)) { - if (rc) - goto error; + kfree(raid_map); + + raid_map = kmalloc(raid_map_size, GFP_KERNEL); + if (!raid_map) + return -ENOMEM; + + rc = pqi_send_scsi_raid_request(ctrl_info, CISS_GET_RAID_MAP, + device->scsi3addr, raid_map, raid_map_size, + 0, NULL, NO_TIMEOUT); + if (rc) + goto error; + + if (get_unaligned_le32(&raid_map->structure_size) + != raid_map_size) { + dev_warn(&ctrl_info->pci_dev->dev, + "Requested %d bytes, received %d bytes", + raid_map_size, + get_unaligned_le32(&raid_map->structure_size)); + goto error; + } + } rc = pqi_validate_raid_map(ctrl_info, device, raid_map); if (rc) @@ -1165,6 +1305,9 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info, if (rc) goto out; + if (vpd->page_code != CISS_VPD_LV_STATUS) + goto out; + page_length = offsetof(struct ciss_vpd_logical_volume_status, volume_status) + vpd->page_length; if (page_length < sizeof(*vpd)) @@ -1190,6 +1333,9 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, u8 *buffer; unsigned int retries; + if (device->is_expander_smp_device) + return 0; + buffer = kmalloc(64, GFP_KERNEL); if (!buffer) return -ENOMEM; @@ -1225,6 +1371,14 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, } } + if (pqi_get_device_id(ctrl_info, device->scsi3addr, + device->unique_id, sizeof(device->unique_id)) < 0) + dev_warn(&ctrl_info->pci_dev->dev, + "Can't get device id for scsi %d:%d:%d:%d\n", + ctrl_info->scsi_host->host_no, + device->bus, device->target, + device->lun); + out: kfree(buffer); @@ -1387,9 +1541,24 @@ static int pqi_add_device(struct pqi_ctrl_info *ctrl_info, return rc; } +#define PQI_PENDING_IO_TIMEOUT_SECS 20 + static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { + int rc; + + pqi_device_remove_start(device); + + rc = pqi_device_wait_for_pending_io(ctrl_info, device, + PQI_PENDING_IO_TIMEOUT_SECS); + if (rc) + dev_err(&ctrl_info->pci_dev->dev, + "scsi %d:%d:%d:%d removing device with %d outstanding commands\n", + ctrl_info->scsi_host->host_no, device->bus, + device->target, device->lun, + atomic_read(&device->scsi_cmds_outstanding)); + if (pqi_is_logical_device(device)) scsi_remove_device(device->sdev); else @@ -1454,6 +1623,14 @@ static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info, return DEVICE_NOT_FOUND; } +static inline const char *pqi_device_type(struct pqi_scsi_dev *device) +{ + if (device->is_expander_smp_device) + return "Enclosure SMP "; + + return scsi_device_type(device->devtype); +} + #define PQI_DEV_INFO_BUFFER_LENGTH 128 static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, @@ -1489,7 +1666,7 @@ static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info, count += snprintf(buffer + count, PQI_DEV_INFO_BUFFER_LENGTH - count, " %s %.8s %.16s ", - scsi_device_type(device->devtype), + pqi_device_type(device), device->vendor, device->model); @@ -1534,6 +1711,8 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, existing_device->is_physical_device = new_device->is_physical_device; existing_device->is_external_raid_device = new_device->is_external_raid_device; + existing_device->is_expander_smp_device = + new_device->is_expander_smp_device; existing_device->aio_enabled = new_device->aio_enabled; memcpy(existing_device->vendor, new_device->vendor, sizeof(existing_device->vendor)); @@ -1558,6 +1737,7 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, new_device->raid_bypass_configured; existing_device->raid_bypass_enabled = new_device->raid_bypass_enabled; + existing_device->device_offline = false; /* To prevent this from being freed later. */ new_device->raid_map = NULL; @@ -1589,6 +1769,14 @@ static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info, device->keep_device = false; } +static inline bool pqi_is_device_added(struct pqi_scsi_dev *device) +{ + if (device->is_expander_smp_device) + return device->sas_port != NULL; + + return device->sdev != NULL; +} + static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices) { @@ -1674,6 +1862,9 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + if (pqi_ctrl_in_ofa(ctrl_info)) + pqi_ctrl_ofa_done(ctrl_info); + /* Remove all devices that have gone away. */ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) { @@ -1683,7 +1874,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, } else { pqi_dev_info(ctrl_info, "removed", device); } - if (device->sdev) + if (pqi_is_device_added(device)) pqi_remove_device(ctrl_info, device); list_del(&device->delete_list_entry); pqi_free_device(device); @@ -1705,7 +1896,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, /* Expose any new devices. */ list_for_each_entry_safe(device, next, &add_list, add_list_entry) { - if (!device->sdev) { + if (!pqi_is_device_added(device)) { pqi_dev_info(ctrl_info, "added", device); rc = pqi_add_device(ctrl_info, device); if (rc) { @@ -1722,7 +1913,12 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, static bool pqi_is_supported_device(struct pqi_scsi_dev *device) { - bool is_supported = false; + bool is_supported; + + if (device->is_expander_smp_device) + return true; + + is_supported = false; switch (device->devtype) { case TYPE_DISK: @@ -1756,6 +1952,30 @@ static inline bool pqi_skip_device(u8 *scsi3addr) return false; } +static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) +{ + if (!device->is_physical_device) + return false; + + if (device->is_expander_smp_device) + return true; + + switch (device->devtype) { + case TYPE_DISK: + case TYPE_ZBC: + case TYPE_ENCLOSURE: + return true; + } + + return false; +} + +static inline bool pqi_expose_device(struct pqi_scsi_dev *device) +{ + return !device->is_physical_device || + !pqi_skip_device(device->scsi3addr); +} + static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) { int i; @@ -1864,9 +2084,14 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); device->is_physical_device = is_physical_device; - if (!is_physical_device) + if (is_physical_device) { + if (phys_lun_ext_entry->device_type == + SA_EXPANDER_SMP_DEVICE) + device->is_expander_smp_device = true; + } else { device->is_external_raid_device = pqi_is_external_raid_addr(scsi3addr); + } /* Gather information about the device. */ rc = pqi_get_device_info(ctrl_info, device); @@ -1899,30 +2124,23 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) device->wwid = phys_lun_ext_entry->wwid; if ((phys_lun_ext_entry->device_flags & REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && - phys_lun_ext_entry->aio_handle) + phys_lun_ext_entry->aio_handle) { device->aio_enabled = true; + device->aio_handle = + phys_lun_ext_entry->aio_handle; + } + if (device->devtype == TYPE_DISK || + device->devtype == TYPE_ZBC) { + pqi_get_physical_disk_info(ctrl_info, + device, id_phys); + } } else { memcpy(device->volume_id, log_lun_ext_entry->volume_id, sizeof(device->volume_id)); } - switch (device->devtype) { - case TYPE_DISK: - case TYPE_ZBC: - case TYPE_ENCLOSURE: - if (device->is_physical_device) { - device->sas_address = - get_unaligned_be64(&device->wwid); - if (device->devtype == TYPE_DISK || - device->devtype == TYPE_ZBC) { - device->aio_handle = - phys_lun_ext_entry->aio_handle; - pqi_get_physical_disk_info(ctrl_info, - device, id_phys); - } - } - break; - } + if (pqi_is_device_with_sas_address(device)) + device->sas_address = get_unaligned_be64(&device->wwid); new_device_list[num_valid_devices++] = device; } @@ -1965,7 +2183,7 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) if (!device) break; - if (device->sdev) + if (pqi_is_device_added(device)) pqi_remove_device(ctrl_info, device); pqi_free_device(device); } @@ -1991,7 +2209,13 @@ static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) static void pqi_scan_start(struct Scsi_Host *shost) { - pqi_scan_scsi_devices(shost_to_hba(shost)); + struct pqi_ctrl_info *ctrl_info; + + ctrl_info = shost_to_hba(shost); + if (pqi_ctrl_in_ofa(ctrl_info)) + return; + + pqi_scan_scsi_devices(ctrl_info); } /* Returns TRUE if scan is finished. */ @@ -2018,6 +2242,12 @@ static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info) mutex_unlock(&ctrl_info->lun_reset_mutex); } +static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info) +{ + mutex_lock(&ctrl_info->ofa_mutex); + mutex_unlock(&ctrl_info->ofa_mutex); +} + static inline void pqi_set_encryption_info( struct pqi_encryption_info *encryption_info, struct raid_map *raid_map, u64 first_block) @@ -2325,9 +2555,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, (map_row * total_disks_per_row) + first_column; } - if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) - return PQI_RAID_BYPASS_INELIGIBLE; - aio_handle = raid_map->disk_data[map_index].aio_handle; disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) + first_row * strip_size + @@ -2397,7 +2624,7 @@ static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info) u8 status; pqi_registers = ctrl_info->pqi_registers; - timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies; + timeout = (PQI_MODE_READY_TIMEOUT_SECS * PQI_HZ) + jiffies; while (1) { signature = readq(&pqi_registers->signature); @@ -2458,10 +2685,9 @@ static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path) return; device->device_offline = true; - scsi_device_set_state(sdev, SDEV_OFFLINE); ctrl_info = shost_to_hba(sdev->host); pqi_schedule_rescan_worker(ctrl_info); - dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n", + dev_err(&ctrl_info->pci_dev->dev, "re-scanning %s scsi %d:%d:%d:%d\n", path, ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun); } @@ -2665,6 +2891,9 @@ static int pqi_interpret_task_management_response( case SOP_TMF_FUNCTION_SUCCEEDED: rc = 0; break; + case SOP_TMF_REJECTED: + rc = -EAGAIN; + break; default: rc = -EIO; break; @@ -2704,8 +2933,17 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, switch (response->header.iu_type) { case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: + if (io_request->scmd) + io_request->scmd->result = 0; + /* fall through */ case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: break; + case PQI_RESPONSE_IU_VENDOR_GENERAL: + io_request->status = + get_unaligned_le16( + &((struct pqi_vendor_general_response *) + response)->status); + break; case PQI_RESPONSE_IU_TASK_MANAGEMENT: io_request->status = pqi_interpret_task_management_response( @@ -2825,6 +3063,111 @@ static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info, pqi_send_event_ack(ctrl_info, &request, sizeof(request)); } +#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30 +#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1 + +static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( + struct pqi_ctrl_info *ctrl_info) +{ + unsigned long timeout; + u8 status; + + timeout = (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS * PQI_HZ) + jiffies; + + while (1) { + status = pqi_read_soft_reset_status(ctrl_info); + if (status & PQI_SOFT_RESET_INITIATE) + return RESET_INITIATE_DRIVER; + + if (status & PQI_SOFT_RESET_ABORT) + return RESET_ABORT; + + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for soft reset status\n"); + return RESET_TIMEDOUT; + } + + if (!sis_is_firmware_running(ctrl_info)) + return RESET_NORESPONSE; + + ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS); + } +} + +static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, + enum pqi_soft_reset_status reset_status) +{ + int rc; + + switch (reset_status) { + case RESET_INITIATE_DRIVER: + /* fall through */ + case RESET_TIMEDOUT: + dev_info(&ctrl_info->pci_dev->dev, + "resetting controller %u\n", ctrl_info->ctrl_id); + sis_soft_reset(ctrl_info); + /* fall through */ + case RESET_INITIATE_FIRMWARE: + rc = pqi_ofa_ctrl_restart(ctrl_info); + pqi_ofa_free_host_buffer(ctrl_info); + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation for controller %u: %s\n", + ctrl_info->ctrl_id, rc == 0 ? "SUCCESS" : "FAILED"); + break; + case RESET_ABORT: + pqi_ofa_ctrl_unquiesce(ctrl_info); + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation for controller %u: %s\n", + ctrl_info->ctrl_id, "ABORTED"); + break; + case RESET_NORESPONSE: + pqi_ofa_free_host_buffer(ctrl_info); + pqi_take_ctrl_offline(ctrl_info); + break; + } +} + +static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, + struct pqi_event *event) +{ + u16 event_id; + enum pqi_soft_reset_status status; + + event_id = get_unaligned_le16(&event->event_id); + + mutex_lock(&ctrl_info->ofa_mutex); + + if (event_id == PQI_EVENT_OFA_QUIESCE) { + dev_info(&ctrl_info->pci_dev->dev, + "Received Online Firmware Activation quiesce event for controller %u\n", + ctrl_info->ctrl_id); + pqi_ofa_ctrl_quiesce(ctrl_info); + pqi_acknowledge_event(ctrl_info, event); + if (ctrl_info->soft_reset_handshake_supported) { + status = pqi_poll_for_soft_reset_status(ctrl_info); + pqi_process_soft_reset(ctrl_info, status); + } else { + pqi_process_soft_reset(ctrl_info, + RESET_INITIATE_FIRMWARE); + } + + } else if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { + pqi_acknowledge_event(ctrl_info, event); + pqi_ofa_setup_host_buffer(ctrl_info, + le32_to_cpu(event->ofa_bytes_requested)); + pqi_ofa_host_memory_update(ctrl_info); + } else if (event_id == PQI_EVENT_OFA_CANCELLED) { + pqi_ofa_free_host_buffer(ctrl_info); + pqi_acknowledge_event(ctrl_info, event); + dev_info(&ctrl_info->pci_dev->dev, + "Online Firmware Activation(%u) cancel reason : %u\n", + ctrl_info->ctrl_id, event->ofa_cancel_reason); + } + + mutex_unlock(&ctrl_info->ofa_mutex); +} + static void pqi_event_worker(struct work_struct *work) { unsigned int i; @@ -2844,6 +3187,11 @@ static void pqi_event_worker(struct work_struct *work) for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) { if (event->pending) { event->pending = false; + if (event->event_type == PQI_EVENT_TYPE_OFA) { + pqi_ctrl_unbusy(ctrl_info); + pqi_ofa_process_event(ctrl_info, event); + return; + } pqi_acknowledge_event(ctrl_info, event); } event++; @@ -2853,7 +3201,7 @@ out: pqi_ctrl_unbusy(ctrl_info); } -#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) +#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ) static void pqi_heartbeat_timer_handler(struct timer_list *t) { @@ -2922,6 +3270,24 @@ static inline bool pqi_is_supported_event(unsigned int event_type) return pqi_event_type_to_event_index(event_type) != -1; } +static void pqi_ofa_capture_event_payload(struct pqi_event *event, + struct pqi_event_response *response) +{ + u16 event_id; + + event_id = get_unaligned_le16(&event->event_id); + + if (event->event_type == PQI_EVENT_TYPE_OFA) { + if (event_id == PQI_EVENT_OFA_MEMORY_ALLOCATION) { + event->ofa_bytes_requested = + response->data.ofa_memory_allocation.bytes_requested; + } else if (event_id == PQI_EVENT_OFA_CANCELLED) { + event->ofa_cancel_reason = + response->data.ofa_cancelled.reason; + } + } +} + static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) { unsigned int num_events; @@ -2956,6 +3322,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) event->event_id = response->event_id; event->additional_event_id = response->additional_event_id; + pqi_ofa_capture_event_payload(event, response); } } @@ -3389,7 +3756,7 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) return 0; } -#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ +#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1 static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) @@ -3482,7 +3849,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, admin_queues = &ctrl_info->admin_queues; oq_ci = admin_queues->oq_ci_copy; - timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; + timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * PQI_HZ) + jiffies; while (1) { oq_pi = readl(admin_queues->oq_pi); @@ -3597,7 +3964,7 @@ static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info, while (1) { if (wait_for_completion_io_timeout(wait, - PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) { + PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * PQI_HZ)) { rc = 0; break; } @@ -4927,7 +5294,17 @@ void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd) { struct pqi_scsi_dev *device; + if (!scmd->device) { + set_host_byte(scmd, DID_NO_CONNECT); + return; + } + device = scmd->device->hostdata; + if (!device) { + set_host_byte(scmd, DID_NO_CONNECT); + return; + } + atomic_dec(&device->scsi_cmds_outstanding); } @@ -4944,16 +5321,24 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, device = scmd->device->hostdata; ctrl_info = shost_to_hba(shost); + if (!device) { + set_host_byte(scmd, DID_NO_CONNECT); + pqi_scsi_done(scmd); + return 0; + } + atomic_inc(&device->scsi_cmds_outstanding); - if (pqi_ctrl_offline(ctrl_info)) { + if (pqi_ctrl_offline(ctrl_info) || pqi_device_in_remove(ctrl_info, + device)) { set_host_byte(scmd, DID_NO_CONNECT); pqi_scsi_done(scmd); return 0; } pqi_ctrl_busy(ctrl_info); - if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) { + if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || + pqi_ctrl_in_ofa(ctrl_info)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -5098,25 +5483,75 @@ static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info, } } +static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + unsigned int path; + struct pqi_queue_group *queue_group; + unsigned long flags; + struct pqi_io_request *io_request; + struct pqi_io_request *next; + struct scsi_cmnd *scmd; + + for (i = 0; i < ctrl_info->num_queue_groups; i++) { + queue_group = &ctrl_info->queue_groups[i]; + + for (path = 0; path < 2; path++) { + spin_lock_irqsave(&queue_group->submit_lock[path], + flags); + + list_for_each_entry_safe(io_request, next, + &queue_group->request_list[path], + request_list_entry) { + + scmd = io_request->scmd; + if (!scmd) + continue; + + list_del(&io_request->request_list_entry); + set_host_byte(scmd, DID_RESET); + pqi_scsi_done(scmd); + } + + spin_unlock_irqrestore( + &queue_group->submit_lock[path], flags); + } + } +} + static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) + struct pqi_scsi_dev *device, unsigned long timeout_secs) { + unsigned long timeout; + + timeout = (timeout_secs * PQI_HZ) + jiffies; + while (atomic_read(&device->scsi_cmds_outstanding)) { pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; + if (timeout_secs != NO_TIMEOUT) { + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for pending IO\n"); + return -ETIMEDOUT; + } + } usleep_range(1000, 2000); } return 0; } -static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) +static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, + unsigned long timeout_secs) { bool io_pending; unsigned long flags; + unsigned long timeout; struct pqi_scsi_dev *device; + timeout = (timeout_secs * PQI_HZ) + jiffies; while (1) { io_pending = false; @@ -5138,6 +5573,13 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info) if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; + if (timeout_secs != NO_TIMEOUT) { + if (time_after(jiffies, timeout)) { + dev_err(&ctrl_info->pci_dev->dev, + "timed out waiting for pending IO\n"); + return -ETIMEDOUT; + } + } usleep_range(1000, 2000); } @@ -5161,7 +5603,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, while (1) { if (wait_for_completion_io_timeout(wait, - PQI_LUN_RESET_TIMEOUT_SECS * HZ)) { + PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) { rc = 0; break; } @@ -5212,20 +5654,56 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, return rc; } +#define PQI_LUN_RESET_RETRIES 3 +#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000 /* Performs a reset at the LUN level. */ -static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, +static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device) { int rc; + unsigned int retries; + unsigned long timeout_secs; - rc = pqi_lun_reset(ctrl_info, device); - if (rc == 0) - rc = pqi_device_wait_for_pending_io(ctrl_info, device); + for (retries = 0;;) { + rc = pqi_lun_reset(ctrl_info, device); + if (rc != -EAGAIN || + ++retries > PQI_LUN_RESET_RETRIES) + break; + msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); + } + timeout_secs = rc ? PQI_LUN_RESET_TIMEOUT_SECS : NO_TIMEOUT; + + rc |= pqi_device_wait_for_pending_io(ctrl_info, device, timeout_secs); return rc == 0 ? SUCCESS : FAILED; } +static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, + struct pqi_scsi_dev *device) +{ + int rc; + + mutex_lock(&ctrl_info->lun_reset_mutex); + + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + pqi_fail_io_queued_for_device(ctrl_info, device); + rc = pqi_wait_until_inbound_queues_empty(ctrl_info); + pqi_device_reset_start(device); + pqi_ctrl_unblock_requests(ctrl_info); + + if (rc) + rc = FAILED; + else + rc = _pqi_device_reset(ctrl_info, device); + + pqi_device_reset_done(device); + + mutex_unlock(&ctrl_info->lun_reset_mutex); + return rc; +} + static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) { int rc; @@ -5243,28 +5721,16 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) { + dev_err(&ctrl_info->pci_dev->dev, + "controller %u offlined - cannot send device reset\n", + ctrl_info->ctrl_id); rc = FAILED; goto out; } - mutex_lock(&ctrl_info->lun_reset_mutex); - - pqi_ctrl_block_requests(ctrl_info); - pqi_ctrl_wait_until_quiesced(ctrl_info); - pqi_fail_io_queued_for_device(ctrl_info, device); - rc = pqi_wait_until_inbound_queues_empty(ctrl_info); - pqi_device_reset_start(device); - pqi_ctrl_unblock_requests(ctrl_info); - - if (rc) - rc = FAILED; - else - rc = pqi_device_reset(ctrl_info, device); - - pqi_device_reset_done(device); - - mutex_unlock(&ctrl_info->lun_reset_mutex); + pqi_wait_until_ofa_finished(ctrl_info); + rc = pqi_device_reset(ctrl_info, device); out: dev_err(&ctrl_info->pci_dev->dev, "reset of scsi %d:%d:%d:%d: %s\n", @@ -5308,6 +5774,10 @@ static int pqi_slave_alloc(struct scsi_device *sdev) scsi_change_queue_depth(sdev, device->advertised_queue_depth); } + if (pqi_is_logical_device(device)) + pqi_disable_write_same(sdev); + else + sdev->allow_restart = 1; } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -5580,6 +6050,9 @@ static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) ctrl_info = shost_to_hba(sdev->host); + if (pqi_ctrl_in_ofa(ctrl_info)) + return -EBUSY; + switch (cmd) { case CCISS_DEREGDISK: case CCISS_REGNEWDISK: @@ -5684,6 +6157,150 @@ static struct device_attribute *pqi_shost_attrs[] = { NULL }; +static ssize_t pqi_unique_id_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + unsigned char uid[16]; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, + flags); + return -ENODEV; + } + memcpy(uid, device->unique_id, sizeof(uid)); + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return snprintf(buffer, PAGE_SIZE, + "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", + uid[0], uid[1], uid[2], uid[3], + uid[4], uid[5], uid[6], uid[7], + uid[8], uid[9], uid[10], uid[11], + uid[12], uid[13], uid[14], uid[15]); +} + +static ssize_t pqi_lunid_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + u8 lunid[8]; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, + flags); + return -ENODEV; + } + memcpy(lunid, device->scsi3addr, sizeof(lunid)); + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); +} + +#define MAX_PATHS 8 +static ssize_t pqi_path_info_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pqi_ctrl_info *ctrl_info; + struct scsi_device *sdev; + struct pqi_scsi_dev *device; + unsigned long flags; + int i; + int output_len = 0; + u8 box; + u8 bay; + u8 path_map_index = 0; + char *active; + unsigned char phys_connector[2]; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + + device = sdev->hostdata; + if (!device) { + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, + flags); + return -ENODEV; + } + + bay = device->bay; + for (i = 0; i < MAX_PATHS; i++) { + path_map_index = 1<<i; + if (i == device->active_path_index) + active = "Active"; + else if (device->path_map & path_map_index) + active = "Inactive"; + else + continue; + + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "[%d:%d:%d:%d] %20.20s ", + ctrl_info->scsi_host->host_no, + device->bus, device->target, + device->lun, + scsi_device_type(device->devtype)); + + if (device->devtype == TYPE_RAID || + pqi_is_logical_device(device)) + goto end_buffer; + + memcpy(&phys_connector, &device->phys_connector[i], + sizeof(phys_connector)); + if (phys_connector[0] < '0') + phys_connector[0] = '0'; + if (phys_connector[1] < '0') + phys_connector[1] = '0'; + + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "PORT: %.2s ", phys_connector); + + box = device->box[i]; + if (box != 0 && box != 0xFF) + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "BOX: %hhu ", box); + + if ((device->devtype == TYPE_DISK || + device->devtype == TYPE_ZBC) && + pqi_expose_device(device)) + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "BAY: %hhu ", bay); + +end_buffer: + output_len += scnprintf(buf + output_len, + PAGE_SIZE - output_len, + "%s\n", active); + } + + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return output_len; +} + + static ssize_t pqi_sas_address_show(struct device *dev, struct device_attribute *attr, char *buffer) { @@ -5760,12 +6377,18 @@ static ssize_t pqi_raid_level_show(struct device *dev, return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level); } +static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); +static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); +static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL); static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL); static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); static struct device_attribute *pqi_sdev_attrs[] = { + &dev_attr_lunid, + &dev_attr_unique_id, + &dev_attr_path_info, &dev_attr_sas_address, &dev_attr_ssd_smart_path_enabled, &dev_attr_raid_level, @@ -5780,7 +6403,6 @@ static struct scsi_host_template pqi_driver_template = { .scan_start = pqi_scan_start, .scan_finished = pqi_scan_finished, .this_id = -1, - .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = pqi_eh_device_reset_handler, .ioctl = pqi_ioctl, .slave_alloc = pqi_slave_alloc, @@ -5948,6 +6570,244 @@ out: return rc; } +struct pqi_config_table_section_info { + struct pqi_ctrl_info *ctrl_info; + void *section; + u32 section_offset; + void __iomem *section_iomem_addr; +}; + +static inline bool pqi_is_firmware_feature_supported( + struct pqi_config_table_firmware_features *firmware_features, + unsigned int bit_position) +{ + unsigned int byte_index; + + byte_index = bit_position / BITS_PER_BYTE; + + if (byte_index >= le16_to_cpu(firmware_features->num_elements)) + return false; + + return firmware_features->features_supported[byte_index] & + (1 << (bit_position % BITS_PER_BYTE)) ? true : false; +} + +static inline bool pqi_is_firmware_feature_enabled( + struct pqi_config_table_firmware_features *firmware_features, + void __iomem *firmware_features_iomem_addr, + unsigned int bit_position) +{ + unsigned int byte_index; + u8 __iomem *features_enabled_iomem_addr; + + byte_index = (bit_position / BITS_PER_BYTE) + + (le16_to_cpu(firmware_features->num_elements) * 2); + + features_enabled_iomem_addr = firmware_features_iomem_addr + + offsetof(struct pqi_config_table_firmware_features, + features_supported) + byte_index; + + return *((__force u8 *)features_enabled_iomem_addr) & + (1 << (bit_position % BITS_PER_BYTE)) ? true : false; +} + +static inline void pqi_request_firmware_feature( + struct pqi_config_table_firmware_features *firmware_features, + unsigned int bit_position) +{ + unsigned int byte_index; + + byte_index = (bit_position / BITS_PER_BYTE) + + le16_to_cpu(firmware_features->num_elements); + + firmware_features->features_supported[byte_index] |= + (1 << (bit_position % BITS_PER_BYTE)); +} + +static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info, + u16 first_section, u16 last_section) +{ + struct pqi_vendor_general_request request; + + memset(&request, 0, sizeof(request)); + + request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, + &request.header.iu_length); + put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE, + &request.function_code); + put_unaligned_le16(first_section, + &request.data.config_table_update.first_section); + put_unaligned_le16(last_section, + &request.data.config_table_update.last_section); + + return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, + 0, NULL, NO_TIMEOUT); +} + +static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, + struct pqi_config_table_firmware_features *firmware_features, + void __iomem *firmware_features_iomem_addr) +{ + void *features_requested; + void __iomem *features_requested_iomem_addr; + + features_requested = firmware_features->features_supported + + le16_to_cpu(firmware_features->num_elements); + + features_requested_iomem_addr = firmware_features_iomem_addr + + (features_requested - (void *)firmware_features); + + memcpy_toio(features_requested_iomem_addr, features_requested, + le16_to_cpu(firmware_features->num_elements)); + + return pqi_config_table_update(ctrl_info, + PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES, + PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES); +} + +struct pqi_firmware_feature { + char *feature_name; + unsigned int feature_bit; + bool supported; + bool enabled; + void (*feature_status)(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature); +}; + +static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature) +{ + if (!firmware_feature->supported) { + dev_info(&ctrl_info->pci_dev->dev, "%s not supported by controller\n", + firmware_feature->feature_name); + return; + } + + if (firmware_feature->enabled) { + dev_info(&ctrl_info->pci_dev->dev, + "%s enabled\n", firmware_feature->feature_name); + return; + } + + dev_err(&ctrl_info->pci_dev->dev, "failed to enable %s\n", + firmware_feature->feature_name); +} + +static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature) +{ + if (firmware_feature->feature_status) + firmware_feature->feature_status(ctrl_info, firmware_feature); +} + +static DEFINE_MUTEX(pqi_firmware_features_mutex); + +static struct pqi_firmware_feature pqi_firmware_features[] = { + { + .feature_name = "Online Firmware Activation", + .feature_bit = PQI_FIRMWARE_FEATURE_OFA, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "Serial Management Protocol", + .feature_bit = PQI_FIRMWARE_FEATURE_SMP, + .feature_status = pqi_firmware_feature_status, + }, + { + .feature_name = "New Soft Reset Handshake", + .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, + .feature_status = pqi_firmware_feature_status, + }, +}; + +static void pqi_process_firmware_features( + struct pqi_config_table_section_info *section_info) +{ + int rc; + struct pqi_ctrl_info *ctrl_info; + struct pqi_config_table_firmware_features *firmware_features; + void __iomem *firmware_features_iomem_addr; + unsigned int i; + unsigned int num_features_supported; + + ctrl_info = section_info->ctrl_info; + firmware_features = section_info->section; + firmware_features_iomem_addr = section_info->section_iomem_addr; + + for (i = 0, num_features_supported = 0; + i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (pqi_is_firmware_feature_supported(firmware_features, + pqi_firmware_features[i].feature_bit)) { + pqi_firmware_features[i].supported = true; + num_features_supported++; + } else { + pqi_firmware_feature_update(ctrl_info, + &pqi_firmware_features[i]); + } + } + + if (num_features_supported == 0) + return; + + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; + pqi_request_firmware_feature(firmware_features, + pqi_firmware_features[i].feature_bit); + } + + rc = pqi_enable_firmware_features(ctrl_info, firmware_features, + firmware_features_iomem_addr); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "failed to enable firmware features in PQI configuration table\n"); + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; + pqi_firmware_feature_update(ctrl_info, + &pqi_firmware_features[i]); + } + return; + } + + ctrl_info->soft_reset_handshake_supported = false; + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + if (!pqi_firmware_features[i].supported) + continue; + if (pqi_is_firmware_feature_enabled(firmware_features, + firmware_features_iomem_addr, + pqi_firmware_features[i].feature_bit)) { + pqi_firmware_features[i].enabled = true; + if (pqi_firmware_features[i].feature_bit == + PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE) + ctrl_info->soft_reset_handshake_supported = + true; + } + pqi_firmware_feature_update(ctrl_info, + &pqi_firmware_features[i]); + } +} + +static void pqi_init_firmware_features(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { + pqi_firmware_features[i].supported = false; + pqi_firmware_features[i].enabled = false; + } +} + +static void pqi_process_firmware_features_section( + struct pqi_config_table_section_info *section_info) +{ + mutex_lock(&pqi_firmware_features_mutex); + pqi_init_firmware_features(); + pqi_process_firmware_features(section_info); + mutex_unlock(&pqi_firmware_features_mutex); +} + static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) { u32 table_length; @@ -5955,8 +6815,11 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) void __iomem *table_iomem_addr; struct pqi_config_table *config_table; struct pqi_config_table_section_header *section; + struct pqi_config_table_section_info section_info; table_length = ctrl_info->config_table_length; + if (table_length == 0) + return 0; config_table = kmalloc(table_length, GFP_KERNEL); if (!config_table) { @@ -5973,13 +6836,22 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) ctrl_info->config_table_offset; memcpy_fromio(config_table, table_iomem_addr, table_length); + section_info.ctrl_info = ctrl_info; section_offset = get_unaligned_le32(&config_table->first_section_offset); while (section_offset) { section = (void *)config_table + section_offset; + section_info.section = section; + section_info.section_offset = section_offset; + section_info.section_iomem_addr = + table_iomem_addr + section_offset; + switch (get_unaligned_le16(§ion->section_id)) { + case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES: + pqi_process_firmware_features_section(§ion_info); + break; case PQI_CONFIG_TABLE_SECTION_HEARTBEAT: if (pqi_disable_heartbeat) dev_warn(&ctrl_info->pci_dev->dev, @@ -5992,6 +6864,13 @@ static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info) struct pqi_config_table_heartbeat, heartbeat_counter); break; + case PQI_CONFIG_TABLE_SECTION_SOFT_RESET: + ctrl_info->soft_reset_status = + table_iomem_addr + + section_offset + + offsetof(struct pqi_config_table_soft_reset, + soft_reset_status); + break; } section_offset = @@ -6124,10 +7003,6 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->pqi_mode_enabled = true; pqi_save_ctrl_mode(ctrl_info, PQI_MODE); - rc = pqi_process_config_table(ctrl_info); - if (rc) - return rc; - rc = pqi_alloc_admin_queues(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -6189,6 +7064,11 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); ctrl_info->controller_online = true; + + rc = pqi_process_config_table(ctrl_info); + if (rc) + return rc; + pqi_start_heartbeat_timer(ctrl_info); rc = pqi_enable_events(ctrl_info); @@ -6210,6 +7090,13 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) return rc; } + rc = pqi_set_diag_rescan(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error enabling multi-lun rescan\n"); + return rc; + } + rc = pqi_write_driver_version_to_host_wellness(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -6267,6 +7154,24 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) return rc; /* + * Get the controller properties. This allows us to determine + * whether or not it supports PQI mode. + */ + rc = sis_get_ctrl_properties(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining controller properties\n"); + return rc; + } + + rc = sis_get_pqi_capabilities(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining controller capabilities\n"); + return rc; + } + + /* * If the function we are about to call succeeds, the * controller will transition from legacy SIS mode * into PQI mode. @@ -6306,9 +7211,14 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX); ctrl_info->controller_online = true; - pqi_start_heartbeat_timer(ctrl_info); pqi_ctrl_unblock_requests(ctrl_info); + rc = pqi_process_config_table(ctrl_info); + if (rc) + return rc; + + pqi_start_heartbeat_timer(ctrl_info); + rc = pqi_enable_events(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -6316,6 +7226,20 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) return rc; } + rc = pqi_get_ctrl_firmware_version(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining firmware version\n"); + return rc; + } + + rc = pqi_set_diag_rescan(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error enabling multi-lun rescan\n"); + return rc; + } + rc = pqi_write_driver_version_to_host_wellness(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, @@ -6426,6 +7350,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) mutex_init(&ctrl_info->scan_mutex); mutex_init(&ctrl_info->lun_reset_mutex); + mutex_init(&ctrl_info->ofa_mutex); INIT_LIST_HEAD(&ctrl_info->scsi_device_list); spin_lock_init(&ctrl_info->scsi_device_list_lock); @@ -6502,6 +7427,217 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info) pqi_free_ctrl_resources(ctrl_info); } +static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info) +{ + pqi_cancel_update_time_worker(ctrl_info); + pqi_cancel_rescan_worker(ctrl_info); + pqi_wait_until_lun_reset_finished(ctrl_info); + pqi_wait_until_scan_finished(ctrl_info); + pqi_ctrl_ofa_start(ctrl_info); + pqi_ctrl_block_requests(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + pqi_ctrl_wait_for_pending_io(ctrl_info, PQI_PENDING_IO_TIMEOUT_SECS); + pqi_fail_io_queued_for_all_devices(ctrl_info); + pqi_wait_until_inbound_queues_empty(ctrl_info); + pqi_stop_heartbeat_timer(ctrl_info); + ctrl_info->pqi_mode_enabled = false; + pqi_save_ctrl_mode(ctrl_info, SIS_MODE); +} + +static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info) +{ + pqi_ofa_free_host_buffer(ctrl_info); + ctrl_info->pqi_mode_enabled = true; + pqi_save_ctrl_mode(ctrl_info, PQI_MODE); + ctrl_info->controller_online = true; + pqi_ctrl_unblock_requests(ctrl_info); + pqi_start_heartbeat_timer(ctrl_info); + pqi_schedule_update_time_worker(ctrl_info); + pqi_clear_soft_reset_status(ctrl_info, + PQI_SOFT_RESET_ABORT); + pqi_scan_scsi_devices(ctrl_info); +} + +static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, + u32 total_size, u32 chunk_size) +{ + u32 sg_count; + u32 size; + int i; + struct pqi_sg_descriptor *mem_descriptor = NULL; + struct device *dev; + struct pqi_ofa_memory *ofap; + + dev = &ctrl_info->pci_dev->dev; + + sg_count = (total_size + chunk_size - 1); + sg_count /= chunk_size; + + ofap = ctrl_info->pqi_ofa_mem_virt_addr; + + if (sg_count*chunk_size < total_size) + goto out; + + ctrl_info->pqi_ofa_chunk_virt_addr = + kcalloc(sg_count, sizeof(void *), GFP_KERNEL); + if (!ctrl_info->pqi_ofa_chunk_virt_addr) + goto out; + + for (size = 0, i = 0; size < total_size; size += chunk_size, i++) { + dma_addr_t dma_handle; + + ctrl_info->pqi_ofa_chunk_virt_addr[i] = + dma_zalloc_coherent(dev, chunk_size, &dma_handle, + GFP_KERNEL); + + if (!ctrl_info->pqi_ofa_chunk_virt_addr[i]) + break; + + mem_descriptor = &ofap->sg_descriptor[i]; + put_unaligned_le64 ((u64) dma_handle, &mem_descriptor->address); + put_unaligned_le32 (chunk_size, &mem_descriptor->length); + } + + if (!size || size < total_size) + goto out_free_chunks; + + put_unaligned_le32(CISS_SG_LAST, &mem_descriptor->flags); + put_unaligned_le16(sg_count, &ofap->num_memory_descriptors); + put_unaligned_le32(size, &ofap->bytes_allocated); + + return 0; + +out_free_chunks: + while (--i >= 0) { + mem_descriptor = &ofap->sg_descriptor[i]; + dma_free_coherent(dev, chunk_size, + ctrl_info->pqi_ofa_chunk_virt_addr[i], + get_unaligned_le64(&mem_descriptor->address)); + } + kfree(ctrl_info->pqi_ofa_chunk_virt_addr); + +out: + put_unaligned_le32 (0, &ofap->bytes_allocated); + return -ENOMEM; +} + +static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info) +{ + u32 total_size; + u32 min_chunk_size; + u32 chunk_sz; + + total_size = le32_to_cpu( + ctrl_info->pqi_ofa_mem_virt_addr->bytes_allocated); + min_chunk_size = total_size / PQI_OFA_MAX_SG_DESCRIPTORS; + + for (chunk_sz = total_size; chunk_sz >= min_chunk_size; chunk_sz /= 2) + if (!pqi_ofa_alloc_mem(ctrl_info, total_size, chunk_sz)) + return 0; + + return -ENOMEM; +} + +static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, + u32 bytes_requested) +{ + struct pqi_ofa_memory *pqi_ofa_memory; + struct device *dev; + + dev = &ctrl_info->pci_dev->dev; + pqi_ofa_memory = dma_zalloc_coherent(dev, + PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, + &ctrl_info->pqi_ofa_mem_dma_handle, + GFP_KERNEL); + + if (!pqi_ofa_memory) + return; + + put_unaligned_le16(PQI_OFA_VERSION, &pqi_ofa_memory->version); + memcpy(&pqi_ofa_memory->signature, PQI_OFA_SIGNATURE, + sizeof(pqi_ofa_memory->signature)); + pqi_ofa_memory->bytes_allocated = cpu_to_le32(bytes_requested); + + ctrl_info->pqi_ofa_mem_virt_addr = pqi_ofa_memory; + + if (pqi_ofa_alloc_host_buffer(ctrl_info) < 0) { + dev_err(dev, "Failed to allocate host buffer of size = %u", + bytes_requested); + } +} + +static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) +{ + int i; + struct pqi_sg_descriptor *mem_descriptor; + struct pqi_ofa_memory *ofap; + + ofap = ctrl_info->pqi_ofa_mem_virt_addr; + + if (!ofap) + return; + + if (!ofap->bytes_allocated) + goto out; + + mem_descriptor = ofap->sg_descriptor; + + for (i = 0; i < get_unaligned_le16(&ofap->num_memory_descriptors); + i++) { + dma_free_coherent(&ctrl_info->pci_dev->dev, + get_unaligned_le32(&mem_descriptor[i].length), + ctrl_info->pqi_ofa_chunk_virt_addr[i], + get_unaligned_le64(&mem_descriptor[i].address)); + } + kfree(ctrl_info->pqi_ofa_chunk_virt_addr); + +out: + dma_free_coherent(&ctrl_info->pci_dev->dev, + PQI_OFA_MEMORY_DESCRIPTOR_LENGTH, ofap, + ctrl_info->pqi_ofa_mem_dma_handle); + ctrl_info->pqi_ofa_mem_virt_addr = NULL; +} + +static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) +{ + struct pqi_vendor_general_request request; + size_t size; + struct pqi_ofa_memory *ofap; + + memset(&request, 0, sizeof(request)); + + ofap = ctrl_info->pqi_ofa_mem_virt_addr; + + request.header.iu_type = PQI_REQUEST_IU_VENDOR_GENERAL; + put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH, + &request.header.iu_length); + put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE, + &request.function_code); + + if (ofap) { + size = offsetof(struct pqi_ofa_memory, sg_descriptor) + + get_unaligned_le16(&ofap->num_memory_descriptors) * + sizeof(struct pqi_sg_descriptor); + + put_unaligned_le64((u64)ctrl_info->pqi_ofa_mem_dma_handle, + &request.data.ofa_memory_allocation.buffer_address); + put_unaligned_le32(size, + &request.data.ofa_memory_allocation.buffer_length); + + } + + return pqi_submit_raid_request_synchronous(ctrl_info, &request.header, + 0, NULL, NO_TIMEOUT); +} + +#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 + +static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) +{ + msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); + return pqi_ctrl_init_resume(ctrl_info); +} + static void pqi_perform_lockup_action(void) { switch (pqi_lockup_action) { @@ -6600,7 +7736,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { int rc; - int node; + int node, cp_node; struct pqi_ctrl_info *ctrl_info; pqi_print_ctrl_info(pci_dev, id); @@ -6618,8 +7754,12 @@ static int pqi_pci_probe(struct pci_dev *pci_dev, "controller device ID matched using wildcards\n"); node = dev_to_node(&pci_dev->dev); - if (node == NUMA_NO_NODE) - set_dev_node(&pci_dev->dev, 0); + if (node == NUMA_NO_NODE) { + cp_node = cpu_to_node(0); + if (cp_node == NUMA_NO_NODE) + cp_node = 0; + set_dev_node(&pci_dev->dev, cp_node); + } ctrl_info = pqi_alloc_ctrl_info(node); if (!ctrl_info) { @@ -6654,6 +7794,8 @@ static void pqi_pci_remove(struct pci_dev *pci_dev) if (!ctrl_info) return; + ctrl_info->in_shutdown = true; + pqi_remove_ctrl(ctrl_info); } @@ -6671,6 +7813,7 @@ static void pqi_shutdown(struct pci_dev *pci_dev) * storage. */ rc = pqi_flush_cache(ctrl_info, SHUTDOWN); + pqi_free_interrupts(ctrl_info); pqi_reset(ctrl_info); if (rc == 0) return; @@ -6715,11 +7858,12 @@ static __maybe_unused int pqi_suspend(struct pci_dev *pci_dev, pm_message_t stat pqi_cancel_rescan_worker(ctrl_info); pqi_wait_until_scan_finished(ctrl_info); pqi_wait_until_lun_reset_finished(ctrl_info); + pqi_wait_until_ofa_finished(ctrl_info); pqi_flush_cache(ctrl_info, SUSPEND); pqi_ctrl_block_requests(ctrl_info); pqi_ctrl_wait_until_quiesced(ctrl_info); pqi_wait_until_inbound_queues_empty(ctrl_info); - pqi_ctrl_wait_for_pending_io(ctrl_info); + pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); pqi_stop_heartbeat_timer(ctrl_info); if (state.event == PM_EVENT_FREEZE) @@ -6805,6 +7949,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0xc460) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x193d, 0xc461) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x193d, 0xf460) }, { @@ -6841,6 +7993,30 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd227) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd228) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd229) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd22a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd22b) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x19e5, 0xd22c) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0110) }, { diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index b209a35e482e..0e4ef215115f 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -17,9 +17,11 @@ */ #include <linux/kernel.h> +#include <linux/bsg-lib.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_transport_sas.h> +#include <asm/unaligned.h> #include "smartpqi.h" static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port) @@ -97,14 +99,32 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port, identify = &rphy->identify; identify->sas_address = pqi_sas_port->sas_address; - identify->initiator_port_protocols = SAS_PROTOCOL_STP; - identify->target_port_protocols = SAS_PROTOCOL_STP; + + if (pqi_sas_port->device && + pqi_sas_port->device->is_expander_smp_device) { + identify->initiator_port_protocols = SAS_PROTOCOL_SMP; + identify->target_port_protocols = SAS_PROTOCOL_SMP; + } else { + identify->initiator_port_protocols = SAS_PROTOCOL_STP; + identify->target_port_protocols = SAS_PROTOCOL_STP; + } return sas_rphy_add(rphy); } +static struct sas_rphy *pqi_sas_rphy_alloc(struct pqi_sas_port *pqi_sas_port) +{ + if (pqi_sas_port->device && + pqi_sas_port->device->is_expander_smp_device) + return sas_expander_alloc(pqi_sas_port->port, + SAS_FANOUT_EXPANDER_DEVICE); + + return sas_end_device_alloc(pqi_sas_port->port); +} + static struct pqi_sas_port *pqi_alloc_sas_port( - struct pqi_sas_node *pqi_sas_node, u64 sas_address) + struct pqi_sas_node *pqi_sas_node, u64 sas_address, + struct pqi_scsi_dev *device) { int rc; struct pqi_sas_port *pqi_sas_port; @@ -127,6 +147,7 @@ static struct pqi_sas_port *pqi_alloc_sas_port( pqi_sas_port->port = port; pqi_sas_port->sas_address = sas_address; + pqi_sas_port->device = device; list_add_tail(&pqi_sas_port->port_list_entry, &pqi_sas_node->port_list_head); @@ -146,7 +167,7 @@ static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port) struct pqi_sas_phy *next; list_for_each_entry_safe(pqi_sas_phy, next, - &pqi_sas_port->phy_list_head, phy_list_entry) + &pqi_sas_port->phy_list_head, phy_list_entry) pqi_free_sas_phy(pqi_sas_phy); sas_port_delete(pqi_sas_port->port); @@ -176,7 +197,7 @@ static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node) return; list_for_each_entry_safe(pqi_sas_port, next, - &pqi_sas_node->port_list_head, port_list_entry) + &pqi_sas_node->port_list_head, port_list_entry) pqi_free_sas_port(pqi_sas_port); kfree(pqi_sas_node); @@ -206,13 +227,14 @@ int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info) struct pqi_sas_port *pqi_sas_port; struct pqi_sas_phy *pqi_sas_phy; - parent_dev = &shost->shost_gendev; + parent_dev = &shost->shost_dev; pqi_sas_node = pqi_alloc_sas_node(parent_dev); if (!pqi_sas_node) return -ENOMEM; - pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, ctrl_info->sas_address); + pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, + ctrl_info->sas_address, NULL); if (!pqi_sas_port) { rc = -ENODEV; goto free_sas_node; @@ -254,11 +276,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, struct pqi_sas_port *pqi_sas_port; struct sas_rphy *rphy; - pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, device->sas_address); + pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, + device->sas_address, device); if (!pqi_sas_port) return -ENOMEM; - rphy = sas_end_device_alloc(pqi_sas_port->port); + rphy = pqi_sas_rphy_alloc(pqi_sas_port); if (!rphy) { rc = -ENODEV; goto free_sas_port; @@ -329,6 +352,128 @@ static int pqi_sas_phy_speed(struct sas_phy *phy, return -EINVAL; } +#define CSMI_IOCTL_TIMEOUT 60 +#define SMP_CRC_FIELD_LENGTH 4 + +static struct bmic_csmi_smp_passthru_buffer * +pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy, + struct bsg_job *job) +{ + struct bmic_csmi_smp_passthru_buffer *smp_buf; + struct bmic_csmi_ioctl_header *ioctl_header; + struct bmic_csmi_smp_passthru *parameters; + u32 req_size; + u32 resp_size; + + smp_buf = kzalloc(sizeof(*smp_buf), GFP_KERNEL); + if (!smp_buf) + return NULL; + + req_size = job->request_payload.payload_len; + resp_size = job->reply_payload.payload_len; + + ioctl_header = &smp_buf->ioctl_header; + put_unaligned_le32(sizeof(smp_buf->ioctl_header), + &ioctl_header->header_length); + put_unaligned_le32(CSMI_IOCTL_TIMEOUT, &ioctl_header->timeout); + put_unaligned_le32(CSMI_CC_SAS_SMP_PASSTHRU, + &ioctl_header->control_code); + put_unaligned_le32(sizeof(smp_buf->parameters), &ioctl_header->length); + + parameters = &smp_buf->parameters; + parameters->phy_identifier = rphy->identify.phy_identifier; + parameters->port_identifier = 0; + parameters->connection_rate = 0; + put_unaligned_be64(rphy->identify.sas_address, + ¶meters->destination_sas_address); + + if (req_size > SMP_CRC_FIELD_LENGTH) + req_size -= SMP_CRC_FIELD_LENGTH; + + put_unaligned_le32(req_size, ¶meters->request_length); + + put_unaligned_le32(resp_size, ¶meters->response_length); + + sg_copy_to_buffer(job->request_payload.sg_list, + job->reply_payload.sg_cnt, ¶meters->request, + req_size); + + return smp_buf; +} + +static unsigned int pqi_build_sas_smp_handler_reply( + struct bmic_csmi_smp_passthru_buffer *smp_buf, struct bsg_job *job, + struct pqi_raid_error_info *error_info) +{ + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, &smp_buf->parameters.response, + le32_to_cpu(smp_buf->parameters.response_length)); + + job->reply_len = le16_to_cpu(error_info->sense_data_length); + memcpy(job->reply, error_info->data, + le16_to_cpu(error_info->sense_data_length)); + + return job->reply_payload.payload_len - + get_unaligned_le32(&error_info->data_in_transferred); +} + +void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, + struct sas_rphy *rphy) +{ + int rc; + struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + struct bmic_csmi_smp_passthru_buffer *smp_buf; + struct pqi_raid_error_info error_info; + unsigned int reslen = 0; + + pqi_ctrl_busy(ctrl_info); + + if (job->reply_payload.payload_len == 0) { + rc = -ENOMEM; + goto out; + } + + if (!rphy) { + rc = -EINVAL; + goto out; + } + + if (rphy->identify.device_type != SAS_FANOUT_EXPANDER_DEVICE) { + rc = -EINVAL; + goto out; + } + + if (job->request_payload.sg_cnt > 1 || job->reply_payload.sg_cnt > 1) { + rc = -EINVAL; + goto out; + } + + if (pqi_ctrl_offline(ctrl_info)) { + rc = -ENXIO; + goto out; + } + + if (pqi_ctrl_blocked(ctrl_info)) { + rc = -EBUSY; + goto out; + } + + smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job); + if (!smp_buf) { + rc = -ENOMEM; + goto out; + } + + rc = pqi_csmi_smp_passthru(ctrl_info, smp_buf, sizeof(*smp_buf), + &error_info); + if (rc) + goto out; + + reslen = pqi_build_sas_smp_handler_reply(smp_buf, job, &error_info); +out: + bsg_job_done(job, rc, reslen); + pqi_ctrl_unbusy(ctrl_info); +} struct sas_function_template pqi_sas_transport_functions = { .get_linkerrors = pqi_sas_get_linkerrors, .get_enclosure_identifier = pqi_sas_get_enclosure_identifier, @@ -338,4 +483,5 @@ struct sas_function_template pqi_sas_transport_functions = { .phy_setup = pqi_sas_phy_setup, .phy_release = pqi_sas_phy_release, .set_phy_speed = pqi_sas_phy_speed, + .smp_handler = pqi_sas_smp_handler, }; diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index ea91658c7060..dcd11c6418cc 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -34,6 +34,7 @@ #define SIS_REENABLE_SIS_MODE 0x1 #define SIS_ENABLE_MSIX 0x40 #define SIS_ENABLE_INTX 0x80 +#define SIS_SOFT_RESET 0x100 #define SIS_CMD_READY 0x200 #define SIS_TRIGGER_SHUTDOWN 0x800000 #define SIS_PQI_RESET_QUIESCE 0x1000000 @@ -59,7 +60,7 @@ #define SIS_CTRL_KERNEL_UP 0x80 #define SIS_CTRL_KERNEL_PANIC 0x100 -#define SIS_CTRL_READY_TIMEOUT_SECS 30 +#define SIS_CTRL_READY_TIMEOUT_SECS 180 #define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90 #define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10 @@ -90,7 +91,7 @@ static int sis_wait_for_ctrl_ready_with_timeout(struct pqi_ctrl_info *ctrl_info, unsigned long timeout; u32 status; - timeout = (timeout_secs * HZ) + jiffies; + timeout = (timeout_secs * PQI_HZ) + jiffies; while (1) { status = readl(&ctrl_info->registers->sis_firmware_status); @@ -202,7 +203,7 @@ static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info, * the top of the loop in order to give the controller time to start * processing the command before we start polling. */ - timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies; + timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * PQI_HZ) + jiffies; while (1) { msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS); doorbell = readl(®isters->sis_ctrl_to_host_doorbell); @@ -348,7 +349,7 @@ static int sis_wait_for_doorbell_bit_to_clear( u32 doorbell_register; unsigned long timeout; - timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * HZ) + jiffies; + timeout = (SIS_DOORBELL_BIT_CLEAR_TIMEOUT_SECS * PQI_HZ) + jiffies; while (1) { doorbell_register = @@ -420,6 +421,12 @@ u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info) return readl(&ctrl_info->registers->sis_driver_scratch); } +void sis_soft_reset(struct pqi_ctrl_info *ctrl_info) +{ + writel(SIS_SOFT_RESET, + &ctrl_info->registers->sis_host_to_ctrl_doorbell); +} + static void __attribute__((unused)) verify_structures(void) { BUILD_BUG_ON(offsetof(struct sis_base_struct, diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 2bf889dbf5ab..d018cb9c3f82 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -33,5 +33,6 @@ int sis_pqi_reset_quiesce(struct pqi_ctrl_info *ctrl_info); int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info); void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value); u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info); +void sis_soft_reset(struct pqi_ctrl_info *ctrl_info); #endif /* _SMARTPQI_SIS_H */ |