From 7b6859fbdcc4a590c8ef03bcc00d770b42d41c42 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Thu, 18 May 2017 19:41:04 +0300 Subject: qed: Utilize FW 8.20.0.0 This pushes qed [and as result, all qed* drivers] into using 8.20.0.0 firmware. The changes are mostly contained in qed with minor changes to qedi due to some HSI changes. Content-wise, the firmware contains fixes to various issues exposed since the release of the previous firmware, including: - Corrects iSCSI fast retransmit when data digest is enabled. - Stop draining packets when receiving several consecutive PFCs. - Prevent possible assertion when consecutively opening/closing many connections. - Prevent possible assertion due to too long BDQ fetch time. In addition, the new firmware would allow us to later add iWARP support in qed and qedr. Changes from previous version ----------------------------- - V2: Fix warning in qed_debug.c Signed-off-by: Chad Dupuis Signed-off-by: Ram Amrani Signed-off-by: Tomer Tayar Signed-off-by: Manish Rangankar Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- include/linux/qed/common_hsi.h | 209 ++++++++++++++++++++++++++------------- include/linux/qed/eth_common.h | 3 +- include/linux/qed/fcoe_common.h | 1 - include/linux/qed/iscsi_common.h | 91 ++++++++--------- include/linux/qed/rdma_common.h | 2 +- include/linux/qed/roce_common.h | 2 + include/linux/qed/tcp_common.h | 5 +- 7 files changed, 188 insertions(+), 125 deletions(-) (limited to 'include/linux/qed') diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index fbab6e0514f0..a567cbf8c5b4 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -96,12 +96,12 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 -#define MAX_NUM_LL2_RX_QUEUES 32 -#define MAX_NUM_LL2_TX_STATS_COUNTERS 32 +#define MAX_NUM_LL2_RX_QUEUES 48 +#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 15 -#define FW_REVISION_VERSION 3 +#define FW_MINOR_VERSION 20 +#define FW_REVISION_VERSION 0 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -181,6 +181,14 @@ #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) + +#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) +#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) +#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2) +#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3) +#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4) +#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5) + /*****************/ /* DQ CONSTANTS */ /*****************/ @@ -457,7 +465,6 @@ #define PXP_BAR_DQ 1 /* PTT and GTT */ -#define PXP_NUM_PF_WINDOWS 12 #define PXP_PER_PF_ENTRY_SIZE 8 #define PXP_NUM_GLOBAL_WINDOWS 243 #define PXP_GLOBAL_ENTRY_SIZE 4 @@ -482,6 +489,7 @@ #define PXP_PF_ME_OPAQUE_ADDR 0x1f8 #define PXP_PF_ME_CONCRETE_ADDR 0x1fc +#define PXP_NUM_PF_WINDOWS 12 #define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 #define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS #define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 @@ -618,16 +626,21 @@ /*****************/ /* PRM CONSTANTS */ /*****************/ -#define PRM_DMA_PAD_BYTES_NUM 2 -/******************/ -/* SDMs CONSTANTS */ -/******************/ -#define SDM_OP_GEN_TRIG_NONE 0 -#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 -#define SDM_OP_GEN_TRIG_AGG_INT 2 -#define SDM_OP_GEN_TRIG_LOADER 4 -#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 -#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7 +#define PRM_DMA_PAD_BYTES_NUM 2 +/*****************/ +/* SDMs CONSTANTS */ +/*****************/ + +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 +#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 + +/********************/ +/* Completion types */ +/********************/ #define SDM_COMP_TYPE_NONE 0 #define SDM_COMP_TYPE_WAKE_THREAD 1 @@ -638,10 +651,11 @@ #define SDM_COMP_TYPE_INDICATE_ERROR 6 #define SDM_COMP_TYPE_RELEASE_THREAD 7 #define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 -/******************/ -/* PBF CONSTANTS */ -/******************/ +/*****************/ +/* PBF Constants */ +/*****************/ /* Number of PBF command queue lines. Each line is 32B. */ #define PBF_MAX_CMD_LINES 3328 @@ -861,7 +875,7 @@ enum db_dest { /* Enum of doorbell DPM types */ enum db_dpm_type { DPM_LEGACY, - DPM_ROCE, + DPM_RDMA, DPM_L2_INLINE, DPM_L2_BD, MAX_DB_DPM_TYPE @@ -884,8 +898,8 @@ struct db_l2_dpm_data { #define DB_L2_DPM_DATA_RESERVED0_SHIFT 27 #define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7 #define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28 -#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1 -#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31 +#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1 +#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31 }; /* Structure for SGE in a DPM doorbell of type DPM_L2_BD */ @@ -931,31 +945,33 @@ struct db_pwm_addr { }; /* Parameters to RoCE firmware, passed in EDPM doorbell */ -struct db_roce_dpm_params { +struct db_rdma_dpm_params { __le32 params; -#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F -#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0 -#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3 -#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6 -#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF -#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8 -#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF -#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16 -#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 -#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 -#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3 -#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; /* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ -struct db_roce_dpm_data { +struct db_rdma_dpm_data { __le16 icid; __le16 prod_val; - struct db_roce_dpm_params params; + struct db_rdma_dpm_params params; }; /* Igu interrupt command */ @@ -1026,6 +1042,42 @@ struct parsing_and_err_flags { #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; +struct parsing_err_flags { + __le16 flags; +#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1 +#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1 +#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1 +#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1 +#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1 +#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 +}; + struct pb_context { __le32 crc[4]; }; @@ -1288,39 +1340,56 @@ struct tdif_task_context { struct timers_context { __le32 logical_client_0; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED0_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED0_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED0_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED0_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 __le32 logical_client_1; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED1_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED1_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED2_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED2_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED3_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED3_SHIFT 30 __le32 logical_client_2; -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 -#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 -#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 -#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 -#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 -#define TIMERS_CONTEXT_RESERVED2_MASK 0x3 -#define TIMERS_CONTEXT_RESERVED2_SHIFT 30 +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED4_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED4_SHIFT 27 +#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1 +#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28 +#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1 +#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29 +#define TIMERS_CONTEXT_RESERVED5_MASK 0x3 +#define TIMERS_CONTEXT_RESERVED5_SHIFT 30 __le32 host_expiration_fields; -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 -#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 -#define TIMERS_CONTEXT_RESERVED3_MASK 0x7 -#define TIMERS_CONTEXT_RESERVED3_SHIFT 29 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0 +#define TIMERS_CONTEXT_RESERVED6_MASK 0x1 +#define TIMERS_CONTEXT_RESERVED6_SHIFT 27 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1 +#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28 +#define TIMERS_CONTEXT_RESERVED7_MASK 0x7 +#define TIMERS_CONTEXT_RESERVED7_SHIFT 29 }; + +enum tunnel_next_protocol { + e_unknown = 0, + e_l2 = 1, + e_ipv4 = 2, + e_ipv6 = 3, + MAX_TUNNEL_NEXT_PROTOCOL +}; + #endif /* __COMMON_HSI__ */ #endif diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 34d93eb5bfba..cb06e6e368e1 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -75,7 +75,8 @@ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ -#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_BD_THRESHOLD 12 /* num of MAC/VLAN filters */ #define ETH_NUM_MAC_FILTERS 512 diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 947a635d04bb..12fc9e788eea 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -13,7 +13,6 @@ /*********************/ #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 -#define FCOE_MAX_SIZE_FCP_DATA_SUPER (8600) struct fcoe_abts_pkt { __le32 abts_rsp_fc_payload_lo; diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 69949f8e354b..85e086cba639 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -75,25 +75,13 @@ #define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ -#define ISCSI_OPCODE_NOP_OUT_NO_IMM (0) -#define ISCSI_OPCODE_NOP_OUT ( \ - ISCSI_OPCODE_NOP_OUT_NO_IMM | 0x40) -#define ISCSI_OPCODE_SCSI_CMD_NO_IMM (1) -#define ISCSI_OPCODE_SCSI_CMD ( \ - ISCSI_OPCODE_SCSI_CMD_NO_IMM | 0x40) -#define ISCSI_OPCODE_TMF_REQUEST_NO_IMM (2) -#define ISCSI_OPCODE_TMF_REQUEST ( \ - ISCSI_OPCODE_TMF_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM (3) -#define ISCSI_OPCODE_LOGIN_REQUEST ( \ - ISCSI_OPCODE_LOGIN_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_TEXT_REQUEST_NO_IMM (4) -#define ISCSI_OPCODE_TEXT_REQUEST ( \ - ISCSI_OPCODE_TEXT_REQUEST_NO_IMM | 0x40) -#define ISCSI_OPCODE_DATA_OUT (5) -#define ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM (6) -#define ISCSI_OPCODE_LOGOUT_REQUEST ( \ - ISCSI_OPCODE_LOGOUT_REQUEST_NO_IMM | 0x40) +#define ISCSI_OPCODE_NOP_OUT (0) +#define ISCSI_OPCODE_SCSI_CMD (1) +#define ISCSI_OPCODE_TMF_REQUEST (2) +#define ISCSI_OPCODE_LOGIN_REQUEST (3) +#define ISCSI_OPCODE_TEXT_REQUEST (4) +#define ISCSI_OPCODE_DATA_OUT (5) +#define ISCSI_OPCODE_LOGOUT_REQUEST (6) /* iSCSI response/messages op codes */ #define ISCSI_OPCODE_NOP_IN (0x20) @@ -172,17 +160,23 @@ struct iscsi_async_msg_hdr { struct iscsi_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_CMD_HDR_READ_MASK 0x1 -#define ISCSI_CMD_HDR_READ_SHIFT 6 -#define ISCSI_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_CMD_HDR_FINAL_SHIFT 7 - u8 opcode; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 hdr_first_byte; +#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F +#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 +#define ISCSI_CMD_HDR_IMM_MASK 0x1 +#define ISCSI_CMD_HDR_IMM_SHIFT 6 +#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 +#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 __le32 hdr_second_dword; #define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF #define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 @@ -790,9 +784,9 @@ enum iscsi_error_types { ISCSI_CONN_ERROR_LOCAL_COMPLETION_ERROR, ISCSI_CONN_ERROR_DATA_OVERRUN, ISCSI_CONN_ERROR_OUT_OF_SGES_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_URG_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_IP_OPTIONS_ERROR, - ISCSI_CONN_ERROR_TCP_SEG_PROC_CONNECT_INVALID_WS_OPTION, + ISCSI_CONN_ERROR_IP_OPTIONS_ERROR, + ISCSI_CONN_ERROR_PRS_ERRORS, + ISCSI_CONN_ERROR_CONNECT_INVALID_TCP_OPTION, ISCSI_CONN_ERROR_TCP_IP_FRAGMENT_ERROR, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_LEN, ISCSI_CONN_ERROR_PROTOCOL_ERR_AHS_TYPE, @@ -1304,22 +1298,6 @@ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_total_pdu_cnt; }; -struct iscsi_db_data { - u8 params; -#define ISCSI_DB_DATA_DEST_MASK 0x3 -#define ISCSI_DB_DATA_DEST_SHIFT 0 -#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 -#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 -#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 -#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 -#define ISCSI_DB_DATA_RESERVED_MASK 0x1 -#define ISCSI_DB_DATA_RESERVED_SHIFT 5 -#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 sq_prod; -}; - struct tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; @@ -1398,5 +1376,20 @@ struct tstorm_iscsi_task_ag_ctx { __le32 reg1; __le32 reg2; }; +struct iscsi_db_data { + u8 params; +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 sq_prod; +}; #endif /* __ISCSI_COMMON__ */ diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 72c770f9f666..a9b3050f469c 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -42,7 +42,7 @@ #define RDMA_MAX_SGE_PER_SQ_WQE (4) #define RDMA_MAX_SGE_PER_RQ_WQE (4) -#define RDMA_MAX_DATA_SIZE_IN_WQE (0x7FFFFFFF) +#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 866f063026de..fe6a33e45977 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -37,6 +37,8 @@ #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) #define ROCE_MAX_QPS (32 * 1024) +#define ROCE_DCQCN_NP_MAX_QPS (64) +#define ROCE_DCQCN_RP_MAX_QPS (64) enum roce_async_events_type { ROCE_ASYNC_EVENT_NONE = 0, diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index a5e843268f0e..dbf7a43c3e1f 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -111,7 +111,6 @@ struct tcp_offload_params { __le32 snd_wnd; __le32 rcv_wnd; __le32 snd_wl1; - __le32 ts_time; __le32 ts_recent; __le32 ts_recent_age; __le32 total_rt; @@ -122,7 +121,7 @@ struct tcp_offload_params { u8 ka_probe_cnt; u8 rt_cnt; __le16 rtt_var; - __le16 reserved2; + __le16 fw_internal; __le32 ka_timeout; __le32 ka_interval; __le32 max_rt_time; @@ -130,7 +129,7 @@ struct tcp_offload_params { u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; - __le32 ts_ticks_per_second; + __le32 reserved3[2]; }; struct tcp_offload_params_opt2 { -- cgit v1.2.3 From 9d7650c25498e4f51213fe48eddde5778434f375 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Tue, 23 May 2017 09:41:19 +0300 Subject: qed: Align DP_ERR style with other DP macros Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- include/linux/qed/qed_if.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'include/linux/qed') diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index c70ac13a97e6..ff590cb37a00 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -700,11 +700,13 @@ struct qed_common_ops { (((value) >> (name ## _SHIFT)) & name ## _MASK) /* Debug print definitions */ -#define DP_ERR(cdev, fmt, ...) \ - pr_err("[%s:%d(%s)]" fmt, \ - __func__, __LINE__, \ - DP_NAME(cdev) ? DP_NAME(cdev) : "", \ - ## __VA_ARGS__) \ +#define DP_ERR(cdev, fmt, ...) \ + do { \ + pr_err("[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + DP_NAME(cdev) ? DP_NAME(cdev) : "", \ + ## __VA_ARGS__); \ + } while (0) #define DP_NOTICE(cdev, fmt, ...) \ do { \ -- cgit v1.2.3 From ae33666ab89675968d77753d18452b1ef654c43a Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Tue, 23 May 2017 09:41:26 +0300 Subject: qed: Provide MBI information in dev_info Pass additional information about package installed on persistent memory so that protocol drivers would be able to log it. Signed-off-by: Tomer Tayar Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 6 ++++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 3 +++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 30 ++++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 12 ++++++++++++ include/linux/qed/qed_if.h | 17 +++++++++++++++++ 5 files changed, 68 insertions(+) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 4755d0b33b90..802c162d8474 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11782,6 +11782,12 @@ struct nvm_cfg1_glob { u32 led_global_settings; u32 generic_cont1; u32 mbi_version; +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 u32 mbi_date; u32 misc_sig; u32 device_capabilities; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 3043dcce125c..b5313c561fa2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -281,6 +281,9 @@ int qed_fill_dev_info(struct qed_dev *cdev, qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, &dev_info->mfw_rev, NULL); + qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, + &dev_info->mbi_version); + qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, &dev_info->flash_size); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index b32e8190f3fb..fc49c75e6c4b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1523,6 +1523,36 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_mbi_ver) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + /* Read the address of the nvm_cfg */ + nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + if (!nvm_cfg_addr) { + DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); + return -EINVAL; + } + + /* Read the offset of nvm_cfg1 */ + nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, mbi_version); + *p_mbi_ver = qed_rd(p_hwfn, p_ptt, + mbi_ver_addr) & + (NVM_CFG1_GLOB_MBI_VERSION_0_MASK | + NVM_CFG1_GLOB_MBI_VERSION_1_MASK | + NVM_CFG1_GLOB_MBI_VERSION_2_MASK); + + return 0; +} + int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) { struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 3e5bffe3d4e2..40247593e772 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -255,6 +255,18 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id); +/** + * @brief Get the MBI version value + * + * @param p_hwfn + * @param p_ptt + * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_mbi_ver); + /** * @brief Get media type value of the port. * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index ff590cb37a00..b00e6753b4f4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -328,6 +328,14 @@ struct qed_dev_info { /* MFW version */ u32 mfw_rev; +#define QED_MFW_VERSION_0_MASK 0x000000FF +#define QED_MFW_VERSION_0_OFFSET 0 +#define QED_MFW_VERSION_1_MASK 0x0000FF00 +#define QED_MFW_VERSION_1_OFFSET 8 +#define QED_MFW_VERSION_2_MASK 0x00FF0000 +#define QED_MFW_VERSION_2_OFFSET 16 +#define QED_MFW_VERSION_3_MASK 0xFF000000 +#define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; u8 mf_mode; @@ -337,6 +345,15 @@ struct qed_dev_info { bool wol_support; + /* MBI version */ + u32 mbi_version; +#define QED_MBI_VERSION_0_MASK 0x000000FF +#define QED_MBI_VERSION_0_OFFSET 0 +#define QED_MBI_VERSION_1_MASK 0x0000FF00 +#define QED_MBI_VERSION_1_OFFSET 8 +#define QED_MBI_VERSION_2_MASK 0x00FF0000 +#define QED_MBI_VERSION_2_OFFSET 16 + enum qed_dev_type dev_type; /* Output parameters for qede */ -- cgit v1.2.3 From 712c3cbf193fcadf0ba67da61432beb1a71e400b Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Tue, 23 May 2017 09:41:28 +0300 Subject: qed: Replace set_id() api with set_name() Current API between qed and protocol modules allows passing an additional private string - but it doesn't get utilized by qed anywhere. Clarify the API by removing it and renaming it 'set_name'. CC: Manish Rangankar Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 1 - drivers/net/ethernet/qlogic/qed/qed_main.c | 9 +++------ drivers/net/ethernet/qlogic/qede/qede_main.c | 4 ++-- drivers/scsi/qedf/qedf_main.c | 2 +- drivers/scsi/qedi/qedi_main.c | 2 +- include/linux/qed/qed_if.h | 4 +--- 6 files changed, 8 insertions(+), 14 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 2eb6031f0df1..e0becec17b09 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -638,7 +638,6 @@ struct qed_dev { int pcie_width; int pcie_speed; - u8 ver_str[VER_SIZE]; /* Add MF related configuration */ u8 mcp_rev; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index b5313c561fa2..c5bb80b9afc1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -338,6 +338,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, if (!cdev) goto err0; + cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; cdev->protocol = params->protocol; if (params->is_vf) @@ -1128,17 +1129,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev) return 0; } -static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], - char ver_str[VER_SIZE]) +static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) { int i; memcpy(cdev->name, name, NAME_SIZE); for_each_hwfn(cdev, i) snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); - - memcpy(cdev->ver_str, ver_str, VER_SIZE); - cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; } static u32 qed_sb_init(struct qed_dev *cdev, @@ -1692,7 +1689,7 @@ const struct qed_common_ops qed_common_ops_pass = { .probe = &qed_probe, .remove = &qed_remove, .set_power_state = &qed_set_power_state, - .set_id = &qed_set_id, + .set_name = &qed_set_name, .update_pf_params = &qed_update_pf_params, .slowpath_start = &qed_slowpath_start, .slowpath_stop = &qed_slowpath_stop, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index d496ba70ddb8..00c70625f8a4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -259,7 +259,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event, /* Notify qed of the name change */ if (!edev->ops || !edev->ops->common) goto done; - edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede"); + edev->ops->common->set_name(edev->cdev, edev->ndev->name); break; case NETDEV_CHANGEADDR: edev = netdev_priv(ndev); @@ -967,7 +967,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, goto err4; } - edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); + edev->ops->common->set_name(cdev, edev->ndev->name); /* PTP not supported on VFs */ if (!is_vf) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index a5c97342fd5d..b97405ed6cae 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2954,7 +2954,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); sprintf(host_buf, "host_%d", host->host_no); - qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION); + qed_ops->common->set_name(qedf->cdev, host_buf); /* Set xid max values */ diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 92775a8b74b1..073b3051bb8f 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -1843,7 +1843,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) qedi->mac); sprintf(host_buf, "host_%d", qedi->shost->host_no); - qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION); + qedi_ops->common->set_name(qedi->cdev, host_buf); qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b00e6753b4f4..73c46d6d5727 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -520,9 +520,7 @@ struct qed_common_ops { int (*set_power_state)(struct qed_dev *cdev, pci_power_t state); - void (*set_id)(struct qed_dev *cdev, - char name[], - char ver_str[]); + void (*set_name) (struct qed_dev *cdev, char name[]); /* Client drivers need to make this call before slowpath_start. * PF params required for the call before slowpath_start is -- cgit v1.2.3 From 726fdbe9fa7ebccda1579716f68f8bae6fa9c87a Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Thu, 1 Jun 2017 15:29:06 +0300 Subject: qed: Encapsulate interrupt counters in struct We already have an API struct that contains interrupt-related numbers. Use it to encapsulate all information relating to the status of SBs as (used|free). Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 4 ++-- drivers/net/ethernet/qlogic/qed/qed_int.c | 20 +++++++++----------- drivers/net/ethernet/qlogic/qed/qed_int.h | 10 +++++----- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 9 ++++----- include/linux/qed/qed_if.h | 12 +++++++++--- 6 files changed, 30 insertions(+), 27 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3b6114d4461a..1fff0473ddbb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2061,7 +2061,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); feat_num[QED_VF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_L2_QUEUE), - sb_cnt_info.sb_iov_cnt); + sb_cnt_info.iov_cnt); feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) - non_l2_sbs, @@ -2255,7 +2255,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, case QED_SB: memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); - *p_resc_num = sb_cnt_info.sb_cnt; + *p_resc_num = sb_cnt_info.cnt; break; default: return -EINVAL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 3307978f6eeb..c9164e27a1b2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1769,7 +1769,7 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, bool b_set, bool b_slowpath) { u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; - u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; + u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->usage.cnt; u32 igu_sb_id = 0, val = 0; val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); @@ -1827,7 +1827,6 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Initialize base sb / sb cnt for PFs and VFs */ p_igu_info->igu_base_sb = 0xffff; - p_igu_info->igu_sb_cnt = 0; p_igu_info->igu_base_sb_iov = 0xffff; /* Distinguish between existent and non-existent default SB */ @@ -1856,7 +1855,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) { if (p_igu_info->igu_base_sb == 0xffff) p_igu_info->igu_base_sb = igu_sb_id; - p_igu_info->igu_sb_cnt++; + p_igu_info->usage.cnt++; } } else if (!(p_block->is_pf) && (p_block->function_id >= min_vf) && @@ -1867,7 +1866,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) if (p_igu_info->igu_base_sb_iov == 0xffff) p_igu_info->igu_base_sb_iov = igu_sb_id; - p_igu_info->free_blks++; + p_igu_info->usage.iov_cnt++; } /* Mark the First entry belonging to the PF or its VFs @@ -1900,12 +1899,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } /* All non default SB are considered free at this point */ - p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; + p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; + p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x\n", p_igu_info->igu_dsb_id, - p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov); + p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); return 0; } @@ -2003,9 +2003,7 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, if (!info || !p_sb_cnt_info) return; - p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; - p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; - p_sb_cnt_info->sb_free_blk = info->free_blks; + memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); } u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) @@ -2014,10 +2012,10 @@ u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) /* Determine origin of SB id */ if ((sb_id >= p_info->igu_base_sb) && - (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { + (sb_id < p_info->igu_base_sb + p_info->usage.cnt)) { return sb_id - p_info->igu_base_sb; } else if ((sb_id >= p_info->igu_base_sb_iov) && - (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { + (sb_id < p_info->igu_base_sb_iov + p_info->usage.iov_cnt)) { /* We want the first VF queue to be adjacent to the * last PF queue. Since L2 queues can be partial to * SBs, we'll use the feature instead. diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 60aaf9f9bb78..5a0e8f02c969 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -216,11 +216,11 @@ struct qed_igu_block { struct qed_igu_info { struct qed_igu_block entry[MAX_TOT_SB_PER_PATH]; u16 igu_dsb_id; - u16 igu_base_sb; - u16 igu_base_sb_iov; - u16 igu_sb_cnt; - u16 igu_sb_cnt_iov; - u16 free_blks; + + u16 igu_base_sb; + u16 igu_base_sb_iov; + struct qed_sb_cnt_info usage; + }; /* TODO Names of function may change... */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c5bb80b9afc1..ac3bdcd9f0b6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -762,7 +762,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, for_each_hwfn(cdev, i) { memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); - cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; + cdev->int_params.in.num_vectors += sb_cnt_info.cnt; cdev->int_params.in.num_vectors++; /* slowpath */ } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 0827a4187dc7..62b207a80a03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -874,9 +874,9 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, igu_blocks = p_hwfn->hw_info.p_igu_info->entry; - if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) - num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; - p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) + num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); @@ -932,8 +932,7 @@ static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, addr, val); p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; - - p_hwfn->hw_info.p_igu_info->free_blks++; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; } vf->num_sbs = 0; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 73c46d6d5727..607e1c5e185a 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -886,9 +886,15 @@ struct qed_eth_stats { #define TX_PI(tc) (RX_PI + 1 + tc) struct qed_sb_cnt_info { - int sb_cnt; - int sb_iov_cnt; - int sb_free_blk; + /* Original, current, and free SBs for PF */ + int orig; + int cnt; + int free_cnt; + + /* Original, current and free SBS for child VFs */ + int iov_orig; + int iov_cnt; + int free_cnt_iov; }; static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) -- cgit v1.2.3 From 3c5da94278026a4583320f97f6547573fb3a93aa Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 2 Jun 2017 08:58:31 +0300 Subject: qed: Share additional information with qedf Share several new tidbits with qedf: - wwpn & wwnn - Absolute pf-id [this one is actually meant for qedi as well] - Number of available CQs While we're at it, now that qedf will be aware of the available CQs we can add some validation on the inputs it provides. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 8 +++++++- drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 14 ++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 2 ++ include/linux/qed/qed_fcoe_if.h | 5 +++++ include/linux/qed/qed_if.h | 2 ++ 5 files changed, 30 insertions(+), 1 deletion(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 7649f35000db..2d88d4883483 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -2071,16 +2071,22 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) QED_VF_L2_QUE)); } + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, + RESC_NUM(p_hwfn, + QED_CMDQS_CQS)); + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, - "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d ISCSI_CQ=%d #SBS=%d\n", + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n", (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), + (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ), (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), (int)sb_cnt.cnt); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 3fc4ff22960e..df195c02b711 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -141,6 +141,15 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, p_data = &p_ramrod->init_ramrod_data; fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; + /* Sanity */ + if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) { + DP_ERR(p_hwfn, + "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", + fcoe_pf_params->num_cqs, + p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); + return -EINVAL; + } + p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); p_data->sq_num_pages_in_pbl = tmp; @@ -739,6 +748,11 @@ static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, info->secondary_bdq_rq_addr = qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + info->wwpn = hwfn->mcp_info->func_info.wwn_port; + info->wwnn = hwfn->mcp_info->func_info.wwn_node; + + info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ); + return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index ac3bdcd9f0b6..baebd5926895 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -269,6 +269,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == QED_WOL_SUPPORT_PME) dev_info->wol_support = true; + + dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; } else { qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, &dev_info->fw_minor, &dev_info->fw_rev, diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index bd6bcb809415..1e015c50e6b8 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -24,6 +24,11 @@ struct qed_dev_fcoe_info { void __iomem *primary_dbq_rq_addr; void __iomem *secondary_bdq_rq_addr; + + u64 wwpn; + u64 wwnn; + + u8 num_cqs; }; struct qed_fcoe_params_offload { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 607e1c5e185a..e29c6f74a4d4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -360,6 +360,8 @@ struct qed_dev_info { bool vxlan_enable; bool gre_enable; bool geneve_enable; + + u8 abs_pf_id; }; enum qed_sb_type { -- cgit v1.2.3 From 20675b37ee76d11430fd3d4da0851fc6a4e36abc Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 2 Jun 2017 08:58:32 +0300 Subject: qed: Support NVM-image reading API Storage drivers require images from the nvram in boot-from-SAN scenarios. This provides the necessary API between qed and the protocol drivers to perform such reads. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 16 ++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 89 ++++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 21 +++++++ include/linux/qed/qed_if.h | 18 ++++++ 4 files changed, 144 insertions(+) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index baebd5926895..6ac10ce14e5b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1535,6 +1535,21 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, + u8 *buf, u16 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + int rc; + + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_get_nvm_image(hwfn, ptt, type, buf, len); + qed_ptt_release(hwfn, ptt); + return rc; +} + static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) { *rx_coal = cdev->rx_coalesce_usecs; @@ -1712,6 +1727,7 @@ const struct qed_common_ops qed_common_ops_pass = { .dbg_all_data_size = &qed_dbg_all_data_size, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .nvm_get_image = &qed_nvm_get_image, .get_coalesce = &qed_get_coalesce, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index e82f32950361..9da91045d167 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2310,6 +2310,95 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, return rc; } +static int +qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + struct qed_nvm_image_att *p_image_att) +{ + struct bist_nvm_image_att mfw_image_att; + enum nvm_image_type type; + u32 num_images, i; + int rc; + + /* Translate image_id into MFW definitions */ + switch (image_id) { + case QED_NVM_IMAGE_ISCSI_CFG: + type = NVM_TYPE_ISCSI_CFG; + break; + case QED_NVM_IMAGE_FCOE_CFG: + type = NVM_TYPE_FCOE_CFG; + break; + default: + DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", + image_id); + return -EINVAL; + } + + /* Learn number of images, then traverse and see if one fits */ + rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images); + if (rc || !num_images) + return -EINVAL; + + for (i = 0; i < num_images; i++) { + rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt, + &mfw_image_att, i); + if (rc) + return rc; + + if (type == mfw_image_att.image_type) + break; + } + if (i == num_images) { + DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, + "Failed to find nvram image of type %08x\n", + image_id); + return -EINVAL; + } + + p_image_att->start_addr = mfw_image_att.nvm_start_addr; + p_image_att->length = mfw_image_att.len; + + return 0; +} + +int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + u8 *p_buffer, u32 buffer_len) +{ + struct qed_nvm_image_att image_att; + int rc; + + memset(p_buffer, 0, buffer_len); + + rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att); + if (rc) + return rc; + + /* Validate sizes - both the image's and the supplied buffer's */ + if (image_att.length <= 4) { + DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, + "Image [%d] is too small - only %d bytes\n", + image_id, image_att.length); + return -EINVAL; + } + + /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */ + image_att.length -= 4; + + if (image_att.length > buffer_len) { + DP_VERBOSE(p_hwfn, + QED_MSG_STORAGE, + "Image [%d] is too big - %08x bytes where only %08x are available\n", + image_id, image_att.length, buffer_len); + return -ENOMEM; + } + + return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr, + p_buffer, image_att.length); +} + static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) { enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 40247593e772..af03b3651411 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -430,6 +430,27 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); +struct qed_nvm_image_att { + u32 start_addr; + u32 length; +}; + +/** + * @brief Allows reading a whole nvram image + * + * @param p_hwfn + * @param p_ptt + * @param image_id - image requested for reading + * @param p_buffer - allocated buffer into which to fill data + * @param buffer_len - length of the allocated buffer. + * + * @return 0 iff p_buffer now contains the nvram image. + */ +int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_nvm_images image_id, + u8 *p_buffer, u32 buffer_len); + /** * @brief Bist register test * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index e29c6f74a4d4..567ea3ea6c0e 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -156,6 +156,11 @@ struct qed_dcbx_get { struct qed_dcbx_admin_params local; }; +enum qed_nvm_images { + QED_NVM_IMAGE_ISCSI_CFG, + QED_NVM_IMAGE_FCOE_CFG, +}; + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, @@ -630,6 +635,19 @@ struct qed_common_ops { void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); +/** + * @brief nvm_get_image - reads an entire image from nvram + * + * @param cdev + * @param type - type of the request nvram image + * @param buf - preallocated buffer to fill with the image + * @param len - length of the allocated buffer + * + * @return 0 on success, error otherwise + */ + int (*nvm_get_image)(struct qed_dev *cdev, + enum qed_nvm_images type, u8 *buf, u16 len); + /** * @brief get_coalesce - Get coalesce parameters in usec * -- cgit v1.2.3 From dc4528e9e890f82900d75ac6276aba8ce89a80b6 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 2 Jun 2017 08:58:33 +0300 Subject: qed: Add support for changing iSCSI mac Enhance API between qedi and qed, allowing qedi to inform device's firmware when the iSCSI mac is to be changed. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 66 +++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 1 + include/linux/qed/qed_iscsi_if.h | 7 +++ 3 files changed, 74 insertions(+) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index bc8ce09d390f..6103723d7118 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -488,6 +488,54 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +static int +qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_mac_update *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + u8 ucval; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_MAC_UPDATE, + PROTOCOLID_ISCSI, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update; + p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE; + SET_FIELD(p_ramrod->hdr.flags, + ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->fw_cid = cpu_to_le32(p_conn->icid); + ucval = p_conn->remote_mac[1]; + ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval; + ucval = p_conn->remote_mac[0]; + ((u8 *)(&p_ramrod->remote_mac_addr_hi))[1] = ucval; + ucval = p_conn->remote_mac[3]; + ((u8 *)(&p_ramrod->remote_mac_addr_mid))[0] = ucval; + ucval = p_conn->remote_mac[2]; + ((u8 *)(&p_ramrod->remote_mac_addr_mid))[1] = ucval; + ucval = p_conn->remote_mac[5]; + ((u8 *)(&p_ramrod->remote_mac_addr_lo))[0] = ucval; + ucval = p_conn->remote_mac[4]; + ((u8 *)(&p_ramrod->remote_mac_addr_lo))[1] = ucval; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn *p_conn, enum spq_mode comp_mode, @@ -1324,6 +1372,23 @@ static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) return qed_iscsi_get_stats(QED_LEADING_HWFN(cdev), stats); } +static int qed_iscsi_change_mac(struct qed_dev *cdev, + u32 handle, const u8 *mac) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + return qed_sp_iscsi_mac_update(QED_LEADING_HWFN(cdev), + hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats) { @@ -1358,6 +1423,7 @@ static const struct qed_iscsi_ops qed_iscsi_ops_pass = { .destroy_conn = &qed_iscsi_destroy_conn, .clear_sq = &qed_iscsi_clear_conn_sq, .get_stats = &qed_iscsi_stats, + .change_mac = &qed_iscsi_change_mac, }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index b9464f3ab0e2..00dd50f8c42f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -120,6 +120,7 @@ union ramrod_data { struct iscsi_spe_func_dstry iscsi_destroy; struct iscsi_spe_conn_offload iscsi_conn_offload; struct iscsi_conn_update_ramrod_params iscsi_conn_update; + struct iscsi_spe_conn_mac_update iscsi_conn_mac_update; struct iscsi_spe_conn_termination iscsi_conn_terminate; struct vf_start_ramrod_data vf_start; diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 3414649133d2..111e606a74c8 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -210,6 +210,11 @@ struct qed_iscsi_cb_ops { * @param stats - pointer to struck that would be filled * we stats * @return 0 on success, error otherwise. + * @change_mac Change MAC of interface + * @param cdev + * @param handle - the connection handle. + * @param mac - new MAC to configure. + * @return 0 on success, otherwise error value. */ struct qed_iscsi_ops { const struct qed_common_ops *common; @@ -248,6 +253,8 @@ struct qed_iscsi_ops { int (*get_stats)(struct qed_dev *cdev, struct qed_iscsi_stats *stats); + + int (*change_mac)(struct qed_dev *cdev, u32 handle, const u8 *mac); }; const struct qed_iscsi_ops *qed_get_iscsi_ops(void); -- cgit v1.2.3 From f604b17d7fdef574792a7e0b39f1b926d6b43d9d Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 4 Jun 2017 13:31:01 +0300 Subject: qed*: L2 interface to use the SB structures directly Part of an effort of a cleaner seperation between qed and the protocol drivers, the L2 interface is to use the SB structure for initialization purposes opaquely. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 32 +++++++++++++++------------- drivers/net/ethernet/qlogic/qed/qed_l2.h | 24 +++++++++++++++------ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 13 +++++++++-- drivers/net/ethernet/qlogic/qed/qed_vf.c | 8 +++---- drivers/net/ethernet/qlogic/qede/qede_main.c | 4 ++-- include/linux/qed/qed_eth_if.h | 3 +-- 6 files changed, 52 insertions(+), 32 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 9d5791155fcf..262b2ba13e79 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -182,9 +182,15 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->opaque_fid = opaque_fid; p_cid->cid = cid; p_cid->vf_qid = vf_qid; - p_cid->rel = *p_params; p_cid->p_owner = p_hwfn; + /* Fill in parameters */ + p_cid->rel.vport_id = p_params->vport_id; + p_cid->rel.queue_id = p_params->queue_id; + p_cid->rel.stats_id = p_params->stats_id; + p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; + p_cid->sb_idx = p_params->sb_idx; + /* Don't try calculating the absolute indices for VFs */ if (IS_VF(p_hwfn->cdev)) { p_cid->abs = p_cid->rel; @@ -215,10 +221,6 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid->abs.stats_id = p_cid->rel.stats_id; } - /* SBs relevant information was already provided as absolute */ - p_cid->abs.sb = p_cid->rel.sb; - p_cid->abs.sb_idx = p_cid->rel.sb_idx; - /* This is tricky - we're actually interested in whehter this is a PF * entry meant for the VF. */ @@ -235,7 +237,7 @@ out: p_cid->rel.queue_id, p_cid->abs.queue_id, p_cid->rel.stats_id, - p_cid->abs.stats_id, p_cid->abs.sb, p_cid->abs.sb_idx); + p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); return p_cid; @@ -767,7 +769,7 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", p_cid->opaque_fid, p_cid->cid, - p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->abs.sb); + p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -783,8 +785,8 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.rx_queue_start; - p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); - p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; p_ramrod->vport_id = p_cid->abs.vport_id; p_ramrod->stats_counter_id = p_cid->abs.stats_id; p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); @@ -1001,8 +1003,8 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.tx_queue_start; p_ramrod->vport_id = p_cid->abs.vport_id; - p_ramrod->sb_id = cpu_to_le16(p_cid->abs.sb); - p_ramrod->sb_index = p_cid->abs.sb_idx; + p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; p_ramrod->stats_counter_id = p_cid->abs.stats_id; p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); @@ -2279,9 +2281,9 @@ static int qed_start_rxq(struct qed_dev *cdev, } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", p_params->queue_id, rss_num, p_params->vport_id, - p_params->sb); + p_params->p_sb->igu_sb_id); return 0; } @@ -2329,9 +2331,9 @@ static int qed_start_txq(struct qed_dev *cdev, } DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), - "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n", + "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", p_params->queue_id, rss_num, p_params->vport_id, - p_params->sb); + p_params->p_sb->igu_sb_id); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8606bbfa6612..6ad36449dae9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -279,14 +279,24 @@ void qed_reset_vport_stats(struct qed_dev *cdev); #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) +/* Almost identical to the qed_queue_start_common_params, + * but here we maintain the SB index in IGU CAM. + */ +struct qed_queue_cid_params { + u8 vport_id; + u16 queue_id; + u8 stats_id; +}; + struct qed_queue_cid { - /* 'Relative' is a relative term ;-). Usually the indices [not counting - * SBs] would be PF-relative, but there are some cases where that isn't - * the case - specifically for a PF configuring its VF indices it's - * possible some fields [E.g., stats-id] in 'rel' would already be abs. - */ - struct qed_queue_start_common_params rel; - struct qed_queue_start_common_params abs; + /* For stats-id, the `rel' is actually absolute as well */ + struct qed_queue_cid_params rel; + struct qed_queue_cid_params abs; + + /* These have no 'relative' meaning */ + u16 sb_igu_id; + u8 sb_idx; + u32 cid; u16 opaque_fid; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 5ae8827534f8..498c83ebc385 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1951,6 +1951,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, u8 status = PFVF_STATUS_NO_RESOURCE; struct qed_vf_q_info *p_queue; struct vfpf_start_rxq_tlv *req; + struct qed_sb_info sb_dummy; bool b_legacy_vf = false; int rc; @@ -1968,7 +1969,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, params.queue_id = p_queue->fw_rx_qid; params.vport_id = vf->vport_id; params.stats_id = vf->abs_vf_id + 0x10; - params.sb = req->hw_sb; + /* Since IGU index is passed via sb_info, construct a dummy one */ + memset(&sb_dummy, 0, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; params.sb_idx = req->sb_index; p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn, @@ -2273,6 +2277,7 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, u8 status = PFVF_STATUS_NO_RESOURCE; struct vfpf_start_txq_tlv *req; struct qed_vf_q_info *p_queue; + struct qed_sb_info sb_dummy; int rc; u16 pq; @@ -2290,7 +2295,11 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, params.queue_id = p_queue->fw_tx_qid; params.vport_id = vf->vport_id; params.stats_id = vf->abs_vf_id + 0x10; - params.sb = req->hw_sb; + + /* Since IGU index is passed via sb_info, construct a dummy one */ + memset(&sb_dummy, 0, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; params.sb_idx = req->sb_index; p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 29d74074238f..877d41e456e4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -588,8 +588,8 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, req->cqe_pbl_addr = cqe_pbl_addr; req->cqe_pbl_size = cqe_pbl_size; req->rxq_addr = bd_chain_phys_addr; - req->hw_sb = p_cid->rel.sb; - req->sb_index = p_cid->rel.sb_idx; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; req->bd_max_bytes = bd_max_bytes; req->stat_id = -1; @@ -697,8 +697,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, /* Tx */ req->pbl_addr = pbl_addr; req->pbl_size = pbl_size; - req->hw_sb = p_cid->rel.sb; - req->sb_index = p_cid->rel.sb_idx; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 00c70625f8a4..ad1e24962bdb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1770,7 +1770,7 @@ static int qede_start_txq(struct qede_dev *edev, else params.queue_id = txq->index; - params.sb = fp->sb_info->igu_sb_id; + params.p_sb = fp->sb_info; params.sb_idx = sb_idx; rc = edev->ops->q_tx_start(edev->cdev, rss_id, ¶ms, phys_table, @@ -1849,7 +1849,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) memset(&q_params, 0, sizeof(q_params)); q_params.queue_id = rxq->rxq_id; q_params.vport_id = 0; - q_params.sb = fp->sb_info->igu_sb_id; + q_params.p_sb = fp->sb_info; q_params.sb_idx = RX_PI; p_phys_table = diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d66d16a559e1..fd72056f8d49 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -47,8 +47,7 @@ struct qed_queue_start_common_params { /* Relative, but relevant only for PFs */ u8 stats_id; - /* These are always absolute */ - u16 sb; + struct qed_sb_info *p_sb; u8 sb_idx; }; -- cgit v1.2.3 From 08bc8f15e69cbd9f8e3d7bbba4814cec50d51cfe Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 4 Jun 2017 13:31:06 +0300 Subject: qed: Multiple qzone queues for VFs This adds the infrastructure for supporting VFs that want to open multiple transmission queues on the same queue-zone. At this point, there are no VFs that actually request this functionality, but later patches would remedy that. a. VF and PF would communicate the capability during ACQUIRE; Legacy VFs would continue on behaving as they do today b. PF would communicate number of supported CIDs to the VF and would enforce said limitation c. Whenever VF passes a request for a given queue configuration it would also pass an associated index within said queue-zone Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 8 +- drivers/net/ethernet/qlogic/qed/qed_l2.c | 30 ++++-- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 136 ++++++++++++++++++++++++---- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 1 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 39 +++++++- drivers/net/ethernet/qlogic/qed/qed_vf.h | 32 ++++++- include/linux/qed/qed_if.h | 4 + 7 files changed, 215 insertions(+), 35 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 25d5b91f7928..e201214764db 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2116,8 +2116,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons, 1); + if (!p_params->num_vf_cons) + p_params->num_vf_cons = + ETH_PF_PARAMS_VF_CONS_DEFAULT; + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, + p_params->num_vf_cons); p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; break; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 75643c322642..cffa8e7e539b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -198,10 +198,10 @@ static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) { - /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */ - if ((p_cid->vfid == QED_QUEUE_CID_SELF) && - IS_PF(p_hwfn->cdev)) - qed_cxt_release_cid(p_hwfn, p_cid->cid); + bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); + + if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) + _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); /* For PF's VFs we maintain the index inside queue-zone in IOV */ if (p_cid->vfid == QED_QUEUE_CID_SELF) @@ -319,18 +319,30 @@ qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, struct qed_queue_cid_vf_params *p_vf_params) { struct qed_queue_cid *p_cid; + u8 vfid = QED_CXT_PF_CID; bool b_legacy_vf = false; u32 cid = 0; - /* Currently, PF doesn't need to allocate CIDs for any VF */ - if (p_vf_params) - b_legacy_vf = true; + /* In case of legacy VFs, The CID can be derived from the additional + * VF parameters - the VF assumes queue X uses CID X, so we can simply + * use the vf_qid for this purpose as well. + */ + if (p_vf_params) { + vfid = p_vf_params->vfid; + + if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { + b_legacy_vf = true; + cid = p_vf_params->vf_qid; + } + } + /* Get a unique firmware CID for this queue, in case it's a PF. * VF's don't need a CID as the queue configuration will be done * by PF. */ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { - if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) { + if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &cid, vfid)) { DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); return NULL; } @@ -339,7 +351,7 @@ qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, p_params, b_is_rx, p_vf_params); if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) - qed_cxt_release_cid(p_hwfn, cid); + _qed_cxt_release_cid(p_hwfn, cid, vfid); return p_cid; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index e6fb5684b8fd..c620a5fa250b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -47,12 +47,16 @@ static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) { - u8 legacy = QED_QCID_LEGACY_VF_CID; + u8 legacy = 0; if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == ETH_HSI_VER_NO_PKT_LEN_TUNN) legacy |= QED_QCID_LEGACY_VF_RX_PROD; + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + legacy |= QED_QCID_LEGACY_VF_CID; + return legacy; } @@ -1413,6 +1417,10 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, p_req->num_vlan_filters); + p_resp->num_cids = + min_t(u8, p_req->num_cids, + p_hwfn->pf_params.eth_pf_params.num_vf_cons); + /* This isn't really needed/enforced, but some legacy VFs might depend * on the correct filling of this field. */ @@ -1424,10 +1432,11 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_sbs < p_req->num_sbs || p_resp->num_mac_filters < p_req->num_mac_filters || p_resp->num_vlan_filters < p_req->num_vlan_filters || - p_resp->num_mc_filters < p_req->num_mc_filters) { + p_resp->num_mc_filters < p_req->num_mc_filters || + p_resp->num_cids < p_req->num_cids) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", + "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", p_vf->abs_vf_id, p_req->num_rxqs, p_resp->num_rxqs, @@ -1439,7 +1448,9 @@ static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, p_resp->num_mac_filters, p_req->num_vlan_filters, p_resp->num_vlan_filters, - p_req->num_mc_filters, p_resp->num_mc_filters); + p_req->num_mc_filters, + p_resp->num_mc_filters, + p_req->num_cids, p_resp->num_cids); /* Some legacy OSes are incapable of correctly handling this * failure. @@ -1555,6 +1566,12 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->num_hwfns > 1) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; + /* Share our ability to use multiple queue-ids only with VFs + * that request it. + */ + if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; + qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); @@ -1977,10 +1994,37 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, bool b_is_tx) { - if (b_is_tx) - return QED_IOV_LEGACY_QID_TX; - else - return QED_IOV_LEGACY_QID_RX; + struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Search for the qid if the VF published its going to provide it */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { + if (b_is_tx) + return QED_IOV_LEGACY_QID_TX; + else + return QED_IOV_LEGACY_QID_RX; + } + + p_qid_tlv = (struct vfpf_qid_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + CHANNEL_TLV_QID); + if (!p_qid_tlv) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%2x]: Failed to provide qid\n", + p_vf->relative_vf_id); + + return QED_IOV_QID_INVALID; + } + + if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Provided qid out-of-bounds %02x\n", + p_vf->relative_vf_id, p_qid_tlv->qid); + return QED_IOV_QID_INVALID; + } + + return p_qid_tlv->qid; } static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, @@ -2006,7 +2050,12 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, goto out; qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + p_queue = &vf->vf_queues[req->rx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; vf_legacy = qed_vf_calculate_legacy(vf); @@ -2332,12 +2381,17 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, req = &mbx->req_virt->start_txq; if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, - QED_IOV_VALIDATE_Q_DISABLE) || + QED_IOV_VALIDATE_Q_NA) || !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) goto out; qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + p_queue = &vf->vf_queues[req->tx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; vf_legacy = qed_vf_calculate_legacy(vf); @@ -2388,17 +2442,33 @@ static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, struct qed_vf_queue *p_queue; int rc = 0; - if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, - QED_IOV_VALIDATE_Q_ENABLE)) { + if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d] Tried Closing Rx 0x%04x which is inactive\n", - vf->relative_vf_id, rxq_id); + "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", + vf->relative_vf_id, rxq_id, qid_usage_idx); return -EINVAL; } p_queue = &vf->vf_queues[rxq_id]; + /* We've validated the index and the existence of the active RXQ - + * now we need to make sure that it's using the correct qid. + */ + if (!p_queue->cids[qid_usage_idx].p_cid || + p_queue->cids[qid_usage_idx].b_is_tx) { + struct qed_queue_cid *p_cid; + + p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", + vf->relative_vf_id, + rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); + return -EINVAL; + } + + /* Now that we know we have a valid Rx-queue - close it */ rc = qed_eth_rx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid, false, cqe_completion); @@ -2418,11 +2488,13 @@ static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, struct qed_vf_queue *p_queue; int rc = 0; - if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, - QED_IOV_VALIDATE_Q_ENABLE)) + if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) return -EINVAL; p_queue = &vf->vf_queues[txq_id]; + if (!p_queue->cids[qid_usage_idx].p_cid || + !p_queue->cids[qid_usage_idx].b_is_tx) + return -EINVAL; rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); if (rc) @@ -2458,6 +2530,8 @@ static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, /* Find which qid-index is associated with the queue */ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, qid_usage_idx, req->cqe_completion); @@ -2494,6 +2568,8 @@ static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, /* Find which qid-index is associated with the queue */ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); if (!rc) @@ -2524,15 +2600,35 @@ static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; - /* Validate inputs */ - for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) + /* There shouldn't exist a VF that uses queue-qids yet uses this + * API with multiple Rx queues. Validate this. + */ + if ((vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] supports QIDs but sends multiple queues\n", + vf->relative_vf_id); + goto out; + } + + /* Validate inputs - for the legacy case this is still true since + * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. + */ + for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { if (!qed_iov_validate_rxq(p_hwfn, vf, i, - QED_IOV_VALIDATE_Q_ENABLE)) { - DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", - vf->relative_vf_id, req->rx_qid, req->num_rxqs); + QED_IOV_VALIDATE_Q_NA) || + !vf->vf_queues[i].cids[qid_usage_idx].p_cid || + vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", + vf->relative_vf_id, req->rx_qid, + req->num_rxqs); goto out; } + } /* Prepare the handlers */ for (i = 0; i < req->num_rxqs; i++) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 480cd99c69b5..95f55ae2ee8b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -151,6 +151,7 @@ struct qed_iov_vf_mbx { #define QED_IOV_LEGACY_QID_RX (0) #define QED_IOV_LEGACY_QID_TX (1) +#define QED_IOV_QID_INVALID (0xFE) struct qed_vf_queue_cid { bool b_is_tx; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 877d41e456e4..c0d2febad86a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -153,6 +153,22 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) return rc; } +static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Only add QIDs for the queue if it was negotiated with PF */ + if (!(p_iov->acquire_resp.pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); + p_qid_tlv->qid = p_cid->qid_usage_idx; +} + #define VF_ACQUIRE_THRESH 3 static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, struct vf_pf_resc_request *p_req, @@ -160,7 +176,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", + "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", p_req->num_rxqs, p_resp->num_rxqs, p_req->num_rxqs, @@ -171,7 +187,8 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, p_resp->num_mac_filters, p_req->num_vlan_filters, p_resp->num_vlan_filters, - p_req->num_mc_filters, p_resp->num_mc_filters); + p_req->num_mc_filters, + p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids); /* humble our request */ p_req->num_txqs = p_resp->num_txqs; @@ -180,6 +197,7 @@ static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, p_req->num_mac_filters = p_resp->num_mac_filters; p_req->num_vlan_filters = p_resp->num_vlan_filters; p_req->num_mc_filters = p_resp->num_mc_filters; + p_req->num_cids = p_resp->num_cids; } static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) @@ -204,6 +222,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS; req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; req->vfdev_info.fw_major = FW_MAJOR_VERSION; @@ -307,6 +326,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) p_iov->b_pre_fp_hsi = true; + /* In case PF doesn't support multi-queue Tx, update the number of + * CIDs to reflect the number of queues [older PFs didn't fill that + * field]. + */ + if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs; + /* Update bulletin board size with response from PF */ p_iov->bulletin.size = resp->bulletin_size; @@ -609,6 +635,9 @@ qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); } + + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -657,6 +686,8 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, req->num_rxqs = 1; req->cqe_completion = cqe_completion; + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -700,6 +731,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, req->hw_sb = p_cid->sb_igu_id; req->sb_index = p_cid->sb_idx; + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); @@ -749,6 +782,8 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) req->tx_qid = p_cid->rel.queue_id; req->num_txqs = 1; + qed_vf_pf_add_qid(p_hwfn, p_cid); + /* add list termination tlv */ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index d7b9c90b2f60..9588ae779267 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -46,7 +46,8 @@ struct vf_pf_resc_request { u8 num_mac_filters; u8 num_vlan_filters; u8 num_mc_filters; - u16 padding; + u8 num_cids; + u8 padding; }; struct hw_sb_info { @@ -113,6 +114,11 @@ struct vfpf_acquire_tlv { struct vf_pf_vfdev_info { #define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */ #define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ + /* A requirement for supporting multi-Tx queues on a single queue-zone, + * VF would pass qids as additional information whenever passing queue + * references. + */ +#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2) u64 capabilities; u8 fw_major; u8 fw_minor; @@ -185,6 +191,9 @@ struct pfvf_acquire_resp_tlv { */ #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) + /* PF expects queues to be received with additional qids */ +#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3) + u16 db_size; u8 indices_per_sb; u8 os_type; @@ -221,7 +230,8 @@ struct pfvf_acquire_resp_tlv { u8 num_mac_filters; u8 num_vlan_filters; u8 num_mc_filters; - u8 padding[2]; + u8 num_cids; + u8 padding; } resc; u32 bulletin_size; @@ -234,6 +244,16 @@ struct pfvf_start_queue_resp_tlv { u8 padding[4]; }; +/* Extended queue information - additional index for reference inside qzone. + * If commmunicated between VF/PF, each TLV relating to queues should be + * extended by one such [or have a future base TLV that already contains info]. + */ +struct vfpf_qid_tlv { + struct channel_tlv tl; + u8 qid; + u8 padding[3]; +}; + /* Setup Queue */ struct vfpf_start_rxq_tlv { struct vfpf_first_tlv first_tlv; @@ -597,6 +617,8 @@ enum { CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_UPDATE_TUNN_PARAM, + CHANNEL_TLV_RESERVED, + CHANNEL_TLV_QID, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -605,6 +627,12 @@ enum { CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, }; +/* Default number of CIDs [total of both Rx and Tx] to be requested + * by default, and maximum possible number. + */ +#define QED_ETH_VF_DEFAULT_NUM_CIDS (32) +#define QED_ETH_VF_MAX_NUM_CIDS (250) + /* This data is held in the qed_hwfn structure for VFs only. */ struct qed_vf_iov { union vfpf_tlvs *vf2pf_request; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 567ea3ea6c0e..74f6b99754aa 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -185,6 +185,10 @@ struct qed_eth_pf_params { */ u16 num_cons; + /* per-VF number of CIDs */ + u8 num_vf_cons; +#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32) + /* To enable arfs, previous to HW-init a positive number needs to be * set [as filters require allocated searcher ILT memory]. * This will set the maximal number of configured steering-filters. -- cgit v1.2.3 From cbb8a12c089c7f04b86d08d89bdab71ec9bff1f5 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Sun, 4 Jun 2017 13:31:08 +0300 Subject: qed: VF XDP support The final addition on the qed front - - VFs would now require their PFs to provide multiple CIDs - Based on the availability of connections from PF, determine whether XDP is feasible and share it with qede via dev_info. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 19 +++++++++++++++---- drivers/net/ethernet/qlogic/qed/qed_vf.c | 13 ++++++++++--- drivers/net/ethernet/qlogic/qed/qed_vf.h | 12 ++++++++++++ include/linux/qed/qed_eth_if.h | 3 +++ 4 files changed, 40 insertions(+), 7 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index cffa8e7e539b..cb8b05dbfc6e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2119,15 +2119,26 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); + + info->xdp_supported = true; } else { - qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues); - if (cdev->num_hwfns > 1) { - u8 queues = 0; + u16 total_cids = 0; + + /* Determine queues & XDP support */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u8 queues, cids; - qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues); + qed_vf_get_num_cids(p_hwfn, &cids); + qed_vf_get_num_rxqs(p_hwfn, &queues); info->num_queues += queues; + total_cids += cids; } + /* Enable VF XDP in case PF guarntees sufficient connections */ + if (total_cids >= info->num_queues * 3) + info->xdp_supported = true; + qed_vf_get_num_vlan_filters(&cdev->hwfns[0], (u8 *)&info->num_vlan_filters); qed_vf_get_num_mac_filters(&cdev->hwfns[0], diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index cb81c357bf62..1926d1ed439f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -291,9 +291,11 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; /* If we've mapped the doorbell bar, try using queue qids */ - if (p_iov->b_doorbell_bar) + if (p_iov->b_doorbell_bar) { req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | VFPF_ACQUIRE_CAP_QUEUE_QIDS; + p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS; + } /* pf 2 vf bulletin board address */ req->bulletin_addr = p_iov->bulletin.phys; @@ -884,8 +886,8 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", - qid, *pp_doorbell, resp->offset); + "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", + qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); exit: qed_vf_pf_req_end(p_hwfn, rc); @@ -1478,6 +1480,11 @@ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; } +void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) +{ + *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; +} + void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) { memcpy(port_mac, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index b8ce4bef8c94..b65bbc54a097 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -731,6 +731,14 @@ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); */ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); +/** + * @brief Get number of available connections [both Rx and Tx] for VF + * + * @param p_hwfn + * @param num_cids - allocated number of connections + */ +void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); + /** * @brief Get port mac address for VF * @@ -1010,6 +1018,10 @@ static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) { } +static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) +{ +} + static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) { } diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index fd72056f8d49..0eef0a2b1901 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -73,6 +73,9 @@ struct qed_dev_eth_info { /* Legacy VF - this affects the datapath, so qede has to know */ bool is_legacy; + + /* Might depend on available resources [in case of VF] */ + bool xdp_supported; }; struct qed_update_vport_rss_params { -- cgit v1.2.3 From 7c7973b2ae277c6e89dceda2246fff2472c8ffdb Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 9 Jun 2017 17:13:18 +0300 Subject: qed: LL2 to use packed information for tx First step in revising the LL2 interface, this declares qed_ll2_tx_pkt_info as part of the ll2 interface, and uses it for transmission instead of receiving lots of parameters. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 127 +++++++++++++---------------- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 41 ++-------- drivers/net/ethernet/qlogic/qed/qed_roce.c | 18 ++-- include/linux/qed/qed_ll2_if.h | 27 ++++++ 4 files changed, 102 insertions(+), 111 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index f67ed6d39dfd..0b75f5dd25f8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -740,12 +740,13 @@ static void qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { + struct qed_ll2_tx_pkt_info tx_pkt; struct qed_ooo_buffer *p_buffer; - int rc; u16 l4_hdr_offset_w; dma_addr_t first_frag; u16 parse_flags; u8 bd_flags; + int rc; /* Submit Tx buffers here */ while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, @@ -760,13 +761,18 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); - rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1, - p_buffer->vlan, bd_flags, - l4_hdr_offset_w, - p_ll2_conn->conn.tx_dest, 0, - first_frag, - p_buffer->packet_length, - p_buffer, true); + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.vlan = p_buffer->vlan; + tx_pkt.bd_flags = bd_flags; + tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; + tx_pkt.tx_dest = p_ll2_conn->conn.tx_dest; + tx_pkt.first_frag = first_frag; + tx_pkt.first_frag_len = p_buffer->packet_length; + tx_pkt.cookie = p_buffer; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, + &tx_pkt, true); if (rc) { qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buffer, false); @@ -1593,20 +1599,18 @@ out: static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, struct qed_ll2_tx_queue *p_tx, struct qed_ll2_tx_packet *p_curp, - u8 num_of_bds, - dma_addr_t first_frag, - u16 first_frag_len, void *p_cookie, + struct qed_ll2_tx_pkt_info *pkt, u8 notify_fw) { list_del(&p_curp->list_entry); - p_curp->cookie = p_cookie; - p_curp->bd_used = num_of_bds; + p_curp->cookie = pkt->cookie; + p_curp->bd_used = pkt->num_of_bds; p_curp->notify_fw = notify_fw; p_tx->cur_send_packet = p_curp; p_tx->cur_send_frag_num = 0; - p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag; - p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len; + p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag; + p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len; p_tx->cur_send_frag_num++; } @@ -1614,32 +1618,33 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2, struct qed_ll2_tx_packet *p_curp, - u8 num_of_bds, - enum core_tx_dest tx_dest, - u16 vlan, - u8 bd_flags, - u16 l4_hdr_offset_w, - enum core_roce_flavor_type roce_flavor, - dma_addr_t first_frag, - u16 first_frag_len) + struct qed_ll2_tx_pkt_info *pkt) { struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); struct core_tx_bd *start_bd = NULL; + enum core_roce_flavor_type roce_flavor; + enum core_tx_dest tx_dest; u16 bd_data = 0, frag_idx; + roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE + : CORE_RROCE; + + tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW + : CORE_TX_DEST_LB; + start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); - start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan); + start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, - cpu_to_le16(l4_hdr_offset_w)); + cpu_to_le16(pkt->l4_hdr_offset_w)); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); - bd_data |= bd_flags; + bd_data |= pkt->bd_flags; SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); - SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds); + SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); - DMA_REGPAIR_LE(start_bd->addr, first_frag); - start_bd->nbytes = cpu_to_le16(first_frag_len); + DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); + start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); DP_VERBOSE(p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), @@ -1648,17 +1653,17 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, p_ll2->cid, p_ll2->conn.conn_type, prod_idx, - first_frag_len, - num_of_bds, + pkt->first_frag_len, + pkt->num_of_bds, le32_to_cpu(start_bd->addr.hi), le32_to_cpu(start_bd->addr.lo)); - if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds) + if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds) return; /* Need to provide the packet with additional BDs for frags */ for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; - frag_idx < num_of_bds; frag_idx++) { + frag_idx < pkt->num_of_bds; frag_idx++) { struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); @@ -1726,21 +1731,13 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, u8 connection_handle, - u8 num_of_bds, - u16 vlan, - u8 bd_flags, - u16 l4_hdr_offset_w, - enum qed_ll2_tx_dest e_tx_dest, - enum qed_ll2_roce_flavor_type qed_roce_flavor, - dma_addr_t first_frag, - u16 first_frag_len, void *cookie, u8 notify_fw) + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw) { struct qed_ll2_tx_packet *p_curp = NULL; struct qed_ll2_info *p_ll2_conn = NULL; - enum core_roce_flavor_type roce_flavor; struct qed_ll2_tx_queue *p_tx; struct qed_chain *p_tx_chain; - enum core_tx_dest tx_dest; unsigned long flags; int rc = 0; @@ -1750,7 +1747,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, p_tx = &p_ll2_conn->tx_queue; p_tx_chain = &p_tx->txq_chain; - if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET) + if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET) return -EIO; spin_lock_irqsave(&p_tx->lock, flags); @@ -1763,7 +1760,7 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, if (!list_empty(&p_tx->free_descq)) p_curp = list_first_entry(&p_tx->free_descq, struct qed_ll2_tx_packet, list_entry); - if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds) + if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds) p_curp = NULL; if (!p_curp) { @@ -1771,26 +1768,10 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, goto out; } - tx_dest = e_tx_dest == QED_LL2_TX_DEST_NW ? CORE_TX_DEST_NW : - CORE_TX_DEST_LB; - if (qed_roce_flavor == QED_LL2_ROCE) { - roce_flavor = CORE_ROCE; - } else if (qed_roce_flavor == QED_LL2_RROCE) { - roce_flavor = CORE_RROCE; - } else { - rc = -EINVAL; - goto out; - } - /* Prepare packet and BD, and perhaps send a doorbell to FW */ - qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, - num_of_bds, first_frag, - first_frag_len, cookie, notify_fw); - qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, - num_of_bds, tx_dest, - vlan, bd_flags, l4_hdr_offset_w, - roce_flavor, - first_frag, first_frag_len); + qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw); + + qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt); qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); @@ -2245,6 +2226,7 @@ fail: static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) { + struct qed_ll2_tx_pkt_info pkt; const skb_frag_t *frag; int rc = -EINVAL, i; dma_addr_t mapping; @@ -2279,12 +2261,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT); } - rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), - cdev->ll2->handle, - 1 + skb_shinfo(skb)->nr_frags, - vlan, flags, 0, QED_LL2_TX_DEST_NW, - 0 /* RoCE FLAVOR */, - mapping, skb->len, skb, 1); + memset(&pkt, 0, sizeof(pkt)); + pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; + pkt.vlan = vlan; + pkt.bd_flags = flags; + pkt.tx_dest = QED_LL2_TX_DEST_NW; + pkt.first_frag = mapping; + pkt.first_frag_len = skb->len; + pkt.cookie = skb; + + rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, + &pkt, 1); if (rc) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 2c07d0ed971a..96af50b733e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -47,12 +47,6 @@ #define QED_MAX_NUM_OF_LL2_CONNECTIONS (4) -enum qed_ll2_roce_flavor_type { - QED_LL2_ROCE, - QED_LL2_RROCE, - MAX_QED_LL2_ROCE_FLAVOR_TYPE -}; - enum qed_ll2_conn_type { QED_LL2_TYPE_FCOE, QED_LL2_TYPE_ISCSI, @@ -64,12 +58,6 @@ enum qed_ll2_conn_type { MAX_QED_LL2_RX_CONN_TYPE }; -enum qed_ll2_tx_dest { - QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ - QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ - QED_LL2_TX_DEST_MAX -}; - struct qed_ll2_rx_packet { struct list_head list_entry; struct core_rx_bd_with_buff_len *rxq_bd; @@ -216,35 +204,16 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, * to prepare Tx packet submission to FW. * * @param p_hwfn - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param num_of_bds a number of requested BD equals a number of - * fragments in Tx packet - * @param vlan VLAN to insert to packet (if insertion set) - * @param bd_flags - * @param l4_hdr_offset_w L4 Header Offset from start of packet - * (in words). This is needed if both l4_csum - * and ipv6_ext are set - * @param e_tx_dest indicates if the packet is to be transmitted via - * loopback or to the network - * @param first_frag - * @param first_frag_len - * @param cookie - * - * @param notify_fw + * @param connection_handle + * @param pkt - info regarding the tx packet + * @param notify_fw - issue doorbell to fw for this packet * * @return 0 on success, failure otherwise */ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, u8 connection_handle, - u8 num_of_bds, - u16 vlan, - u8 bd_flags, - u16 l4_hdr_offset_w, - enum qed_ll2_tx_dest e_tx_dest, - enum qed_ll2_roce_flavor_type qed_roce_flavor, - dma_addr_t first_frag, - u16 first_frag_len, void *cookie, u8 notify_fw); + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw); /** * @brief qed_ll2_release_connection - releases resources diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index b9434b707b08..afc63c0e7c44 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -2937,6 +2937,7 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev, struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; enum qed_ll2_roce_flavor_type qed_roce_flavor; + struct qed_ll2_tx_pkt_info ll2_pkt; u8 flags = 0; int rc; int i; @@ -2955,11 +2956,18 @@ static int qed_roce_ll2_tx(struct qed_dev *cdev, flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); /* Tx header */ - rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle, - 1 + pkt->n_seg, 0, flags, 0, - QED_LL2_TX_DEST_NW, - qed_roce_flavor, pkt->header.baddr, - pkt->header.len, pkt, 1); + memset(&ll2_pkt, 0, sizeof(ll2_pkt)); + ll2_pkt.num_of_bds = 1 + pkt->n_seg; + ll2_pkt.bd_flags = flags; + ll2_pkt.tx_dest = QED_LL2_TX_DEST_NW; + ll2_pkt.qed_roce_flavor = qed_roce_flavor; + ll2_pkt.first_frag = pkt->header.baddr; + ll2_pkt.first_frag_len = pkt->header.len; + ll2_pkt.cookie = pkt; + + rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), + roce_ll2->handle, + &ll2_pkt, 1); if (rc) { DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc); return QED_ROCE_TX_HEAD_FAILURE; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 4fb4666ea879..78d9ed6c6b6f 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -43,6 +43,18 @@ #include #include +enum qed_ll2_roce_flavor_type { + QED_LL2_ROCE, + QED_LL2_RROCE, + MAX_QED_LL2_ROCE_FLAVOR_TYPE +}; + +enum qed_ll2_tx_dest { + QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ + QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ + QED_LL2_TX_DEST_MAX +}; + struct qed_ll2_stats { u64 gsi_invalid_hdr; u64 gsi_invalid_pkt_length; @@ -67,6 +79,21 @@ struct qed_ll2_stats { u64 sent_bcast_pkts; }; +struct qed_ll2_tx_pkt_info { + void *cookie; + dma_addr_t first_frag; + enum qed_ll2_tx_dest tx_dest; + enum qed_ll2_roce_flavor_type qed_roce_flavor; + u16 vlan; + u16 l4_hdr_offset_w; /* from start of packet */ + u16 first_frag_len; + u8 num_of_bds; + u8 bd_flags; + bool enable_ip_cksum; + bool enable_l4_cksum; + bool calc_ip_len; +}; + #define QED_LL2_UNUSED_HANDLE (0xff) struct qed_ll2_cb_ops { -- cgit v1.2.3 From 68be910cd2fa3f58587438af7ce3def6e03731fa Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 9 Jun 2017 17:13:19 +0300 Subject: qed: Revise ll2 Rx completion This introduces qed_ll2_comp_rx_data as a public struct and moves handling of Rx packets in LL2 into using it. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 84 ++++++++++++++++++------------- include/linux/qed/qed_ll2_if.h | 26 ++++++++++ 2 files changed, 76 insertions(+), 34 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 0b75f5dd25f8..46b71a668892 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -165,41 +165,33 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) } static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - struct qed_ll2_rx_packet *p_pkt, - struct core_rx_fast_path_cqe *p_cqe, - bool b_last_packet) + struct qed_ll2_comp_rx_data *data) { - u16 packet_length = le16_to_cpu(p_cqe->packet_length); - struct qed_ll2_buffer *buffer = p_pkt->cookie; + struct qed_ll2_buffer *buffer = data->cookie; struct qed_dev *cdev = p_hwfn->cdev; - u16 vlan = le16_to_cpu(p_cqe->vlan); - u32 opaque_data_0, opaque_data_1; - u8 pad = p_cqe->placement_offset; dma_addr_t new_phys_addr; struct sk_buff *skb; bool reuse = false; int rc = -EINVAL; u8 *new_data; - opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]); - opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]); - DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA), "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n", - (u64)p_pkt->rx_buf_addr, pad, packet_length, - le16_to_cpu(p_cqe->parse_flags.flags), vlan, - opaque_data_0, opaque_data_1); + (u64)data->rx_buf_addr, + data->u.placement_offset, + data->length.packet_length, + data->parse_flags, + data->vlan, data->opaque_data_0, data->opaque_data_1); if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, - buffer->data, packet_length, false); + buffer->data, data->length.packet_length, false); } /* Determine if data is valid */ - if (packet_length < ETH_HLEN) + if (data->length.packet_length < ETH_HLEN) reuse = true; /* Allocate a replacement for buffer; Reuse upon failure */ @@ -219,9 +211,9 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, goto out_post; } - pad += NET_SKB_PAD; - skb_reserve(skb, pad); - skb_put(skb, packet_length); + data->u.placement_offset += NET_SKB_PAD; + skb_reserve(skb, data->u.placement_offset); + skb_put(skb, data->length.packet_length); skb_checksum_none_assert(skb); /* Get parital ethernet information instead of eth_type_trans(), @@ -232,10 +224,12 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, /* Pass SKB onward */ if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { - if (vlan) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); + if (data->vlan) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + data->vlan); cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, - opaque_data_0, opaque_data_1); + data->opaque_data_0, + data->opaque_data_1); } /* Update Buffer information and update FW producer */ @@ -472,33 +466,55 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn, return 0; } -static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_conn, - union core_rx_cqe_union *p_cqe, - unsigned long *p_lock_flags, - bool b_last_cqe) +static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, + union core_rx_cqe_union *p_cqe, + struct qed_ll2_comp_rx_data *data) +{ + data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags); + data->length.packet_length = + le16_to_cpu(p_cqe->rx_cqe_fp.packet_length); + data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan); + data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]); + data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]); + data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; +} + +static int +qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn, + union core_rx_cqe_union *p_cqe, + unsigned long *p_lock_flags, bool b_last_cqe) { struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct qed_ll2_rx_packet *p_pkt = NULL; + struct qed_ll2_comp_rx_data data; if (!list_empty(&p_rx->active_descq)) p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (!p_pkt) { DP_NOTICE(p_hwfn, - "LL2 Rx completion but active_descq is empty\n"); + "[%d] LL2 Rx completion but active_descq is empty\n", + p_ll2_conn->conn.conn_type); + return -EIO; } list_del(&p_pkt->list_entry); + qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) DP_NOTICE(p_hwfn, "Mismatch between active_descq and the LL2 Rx chain\n"); + list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + data.connection_handle = p_ll2_conn->my_id; + data.cookie = p_pkt->cookie; + data.rx_buf_addr = p_pkt->rx_buf_addr; + data.b_last_packet = b_last_cqe; + spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); - qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id, - p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe); + qed_ll2b_complete_rx_packet(p_hwfn, &data); spin_lock_irqsave(&p_rx->lock, *p_lock_flags); return 0; @@ -538,9 +554,9 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) cqe, flags, b_last_cqe); break; case CORE_RX_CQE_TYPE_REGULAR: - rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, - cqe, &flags, - b_last_cqe); + rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn, + cqe, &flags, + b_last_cqe); break; default: rc = -EIO; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 78d9ed6c6b6f..056ac007dd12 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -79,6 +79,32 @@ struct qed_ll2_stats { u64 sent_bcast_pkts; }; +struct qed_ll2_comp_rx_data { + void *cookie; + dma_addr_t rx_buf_addr; + u16 parse_flags; + u16 vlan; + bool b_last_packet; + u8 connection_handle; + + union { + u16 packet_length; + u16 data_length; + } length; + + u32 opaque_data_0; + u32 opaque_data_1; + + /* GSI only */ + u32 gid_dst[4]; + u16 qp_id; + + union { + u8 placement_offset; + u8 data_length_error; + } u; +}; + struct qed_ll2_tx_pkt_info { void *cookie; dma_addr_t first_frag; -- cgit v1.2.3 From 13c547717231aad7e1635004ae3f698e5e78d6d1 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Fri, 9 Jun 2017 17:13:20 +0300 Subject: qed: Cleaner seperation of LL2 inputs A LL2 connection [qed_ll2_info] has a sub-structure of type qed_ll2_conn that contain various inputs for ll2 acquisition, but the connection also utilizes a couple of other inputs. Restructure the input structure to include all the inputs and refactor the code necessary to populate those. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 379 +++++++++++++++-------------- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 42 +--- drivers/net/ethernet/qlogic/qed/qed_roce.c | 31 +-- include/linux/qed/qed_ll2_if.h | 39 +++ 4 files changed, 262 insertions(+), 229 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 46b71a668892..fcf4ea98e0bf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -315,7 +315,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; @@ -327,7 +327,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->conn.gsi_enable) + if (p_ll2_conn->input.gsi_enable) qed_ll2b_release_tx_gsi_packet(p_hwfn, p_ll2_conn-> my_id, @@ -396,7 +396,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) spin_unlock_irqrestore(&p_tx->lock, flags); tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->conn.gsi_enable) + if (p_ll2_conn->input.gsi_enable) qed_ll2b_complete_tx_gsi_packet(p_hwfn, p_ll2_conn->my_id, p_pkt->cookie, @@ -495,7 +495,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, if (!p_pkt) { DP_NOTICE(p_hwfn, "[%d] LL2 Rx completion but active_descq is empty\n", - p_ll2_conn->conn.conn_type); + p_ll2_conn->input.conn_type); return -EIO; } @@ -522,7 +522,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) { - struct qed_ll2_info *p_ll2_conn = cookie; + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; union core_rx_cqe_union *cqe = NULL; u16 cq_new_idx = 0, cq_old_idx = 0; @@ -536,7 +536,9 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) while (cq_new_idx != cq_old_idx) { bool b_last_cqe = (cq_new_idx == cq_old_idx); - cqe = qed_chain_consume(&p_rx->rcq_chain); + cqe = + (union core_rx_cqe_union *) + qed_chain_consume(&p_rx->rcq_chain); cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); DP_VERBOSE(p_hwfn, @@ -591,7 +593,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; @@ -606,7 +608,6 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) } } -#if IS_ENABLED(CONFIG_QED_ISCSI) static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags) { u8 bd_flags = 0; @@ -782,7 +783,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, tx_pkt.vlan = p_buffer->vlan; tx_pkt.bd_flags = bd_flags; tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; - tx_pkt.tx_dest = p_ll2_conn->conn.tx_dest; + tx_pkt.tx_dest = p_ll2_conn->tx_dest; tx_pkt.first_frag = first_frag; tx_pkt.first_frag_len = p_buffer->packet_length; tx_pkt.cookie = p_buffer; @@ -895,60 +896,11 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) return 0; } -static int -qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, - u16 rx_num_ooo_buffers, u16 mtu) -{ - struct qed_ooo_buffer *p_buf = NULL; - void *p_virt; - u16 buf_idx; - int rc = 0; - - if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) - return rc; - - if (!rx_num_ooo_buffers) - return -EINVAL; - - for (buf_idx = 0; buf_idx < rx_num_ooo_buffers; buf_idx++) { - p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); - if (!p_buf) { - rc = -ENOMEM; - goto out; - } - - p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; - p_buf->rx_buffer_size = (p_buf->rx_buffer_size + - ETH_CACHE_LINE_SIZE - 1) & - ~(ETH_CACHE_LINE_SIZE - 1); - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_buf->rx_buffer_size, - &p_buf->rx_buffer_phys_addr, - GFP_KERNEL); - if (!p_virt) { - kfree(p_buf); - rc = -ENOMEM; - goto out; - } - - p_buf->rx_buffer_virt_addr = p_virt; - qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); - } - - DP_VERBOSE(p_hwfn, QED_MSG_LL2, - "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", - rx_num_ooo_buffers, p_buf->rx_buffer_size); - -out: - return rc; -} - static void qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { - if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) return; qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); @@ -960,7 +912,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, { struct qed_ooo_buffer *p_buffer; - if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) return; qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); @@ -987,69 +939,11 @@ static void qed_ll2_stop_ooo(struct qed_dev *cdev) *handle = QED_LL2_UNUSED_HANDLE; } -static int qed_ll2_start_ooo(struct qed_dev *cdev, - struct qed_ll2_params *params) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; - struct qed_ll2_conn ll2_info = { 0 }; - int rc; - - ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO; - ll2_info.mtu = params->mtu; - ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; - ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; - ll2_info.tx_tc = OOO_LB_TC; - ll2_info.tx_dest = CORE_TX_DEST_LB; - - rc = qed_ll2_acquire_connection(hwfn, &ll2_info, - QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, - handle); - if (rc) { - DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); - goto out; - } - - rc = qed_ll2_establish_connection(hwfn, *handle); - if (rc) { - DP_INFO(cdev, "Failed to establist LL2 OOO connection\n"); - goto fail; - } - - return 0; - -fail: - qed_ll2_release_connection(hwfn, *handle); -out: - *handle = QED_LL2_UNUSED_HANDLE; - return rc; -} -#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ -static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, - void *p_cookie) { return -EINVAL; } -static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, - void *p_cookie) { return -EINVAL; } -static inline int -qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, - u16 rx_num_ooo_buffers, u16 mtu) { return 0; } -static inline void -qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_conn) { return; } -static inline void -qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_conn) { return; } -static inline void qed_ll2_stop_ooo(struct qed_dev *cdev) { return; } -static inline int qed_ll2_start_ooo(struct qed_dev *cdev, - struct qed_ll2_params *params) - { return -EINVAL; } -#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ - static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, u8 action_on_error) { - enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; + enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; struct core_rx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -1075,16 +969,15 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->sb_index = p_rx->rx_sb_index; p_ramrod->complete_event_flg = 1; - p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); - DMA_REGPAIR_LE(p_ramrod->bd_base, - p_rx->rxq_chain.p_phys_addr); + p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); + DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr); cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, qed_chain_get_pbl_phys(&p_rx->rcq_chain)); - p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg; - p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en; + p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; + p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 : 1; @@ -1099,14 +992,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, } p_ramrod->action_on_error.error_type = action_on_error; - p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; + p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; return qed_spq_post(p_hwfn, p_ent, NULL); } static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { - enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type; + enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct core_tx_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -1117,7 +1010,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) p_ll2_conn->tx_stats_en = 0; else p_ll2_conn->tx_stats_en = 1; @@ -1138,7 +1031,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); p_ramrod->sb_index = p_tx->tx_sb_index; - p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu); + p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); p_ramrod->stats_en = p_ll2_conn->tx_stats_en; p_ramrod->stats_id = p_ll2_conn->tx_stats_id; @@ -1147,7 +1040,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain); p_ramrod->pbl_size = cpu_to_le16(pbl_size); - switch (p_ll2_conn->conn.tx_tc) { + switch (p_ll2_conn->input.tx_tc) { case LB_TC: pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); break; @@ -1177,7 +1070,8 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); } - p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable; + p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; + return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -1233,20 +1127,20 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, static int qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, u16 rx_num_desc) + struct qed_ll2_info *p_ll2_info) { struct qed_ll2_rx_packet *p_descq; u32 capacity; int rc = 0; - if (!rx_num_desc) + if (!p_ll2_info->input.rx_num_desc) goto out; rc = qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, QED_CHAIN_CNT_TYPE_U16, - rx_num_desc, + p_ll2_info->input.rx_num_desc, sizeof(struct core_rx_bd), &p_ll2_info->rx_queue.rxq_chain); if (rc) { @@ -1268,7 +1162,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, - rx_num_desc, + p_ll2_info->input.rx_num_desc, sizeof(struct core_rx_fast_path_cqe), &p_ll2_info->rx_queue.rcq_chain); if (rc) { @@ -1278,28 +1172,27 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", - p_ll2_info->conn.conn_type, rx_num_desc); + p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc); out: return rc; } static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, - u16 tx_num_desc) + struct qed_ll2_info *p_ll2_info) { struct qed_ll2_tx_packet *p_descq; u32 capacity; int rc = 0; - if (!tx_num_desc) + if (!p_ll2_info->input.tx_num_desc) goto out; rc = qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, - tx_num_desc, + p_ll2_info->input.tx_num_desc, sizeof(struct core_tx_bd), &p_ll2_info->tx_queue.txq_chain); if (rc) @@ -1316,28 +1209,95 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", - p_ll2_info->conn.conn_type, tx_num_desc); + p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc); out: if (rc) DP_NOTICE(p_hwfn, "Can't allocate memory for Tx LL2 with 0x%08x buffers\n", - tx_num_desc); + p_ll2_info->input.tx_num_desc); + return rc; +} + +static int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, u16 mtu) +{ + struct qed_ooo_buffer *p_buf = NULL; + void *p_virt; + u16 buf_idx; + int rc = 0; + + if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) + return rc; + + /* Correct number of requested OOO buffers if needed */ + if (!p_ll2_info->input.rx_num_ooo_buffers) { + u16 num_desc = p_ll2_info->input.rx_num_desc; + + if (!num_desc) + return -EINVAL; + p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2; + } + + for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers; + buf_idx++) { + p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); + if (!p_buf) { + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; + p_buf->rx_buffer_size = (p_buf->rx_buffer_size + + ETH_CACHE_LINE_SIZE - 1) & + ~(ETH_CACHE_LINE_SIZE - 1); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_buf->rx_buffer_size, + &p_buf->rx_buffer_phys_addr, + GFP_KERNEL); + if (!p_virt) { + kfree(p_buf); + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_virt_addr = p_virt; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); + } + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", + p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size); + +out: return rc; } +static enum core_error_handle +qed_ll2_get_error_choice(enum qed_ll2_error_handle err) +{ + switch (err) { + case QED_LL2_DROP_PACKET: + return LL2_DROP_PACKET; + case QED_LL2_DO_NOTHING: + return LL2_DO_NOTHING; + case QED_LL2_ASSERT: + return LL2_ASSERT; + default: + return LL2_DO_NOTHING; + } +} + int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, - struct qed_ll2_conn *p_params, - u16 rx_num_desc, - u16 tx_num_desc, - u8 *p_connection_handle) + struct qed_ll2_acquire_data *data) { qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; struct qed_ll2_info *p_ll2_info = NULL; + u8 i, *p_tx_max; int rc; - u8 i; - if (!p_connection_handle || !p_hwfn->p_ll2_info) + if (!data->p_connection_handle || !p_hwfn->p_ll2_info) return -EINVAL; /* Find a free connection to be used */ @@ -1356,23 +1316,33 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, if (!p_ll2_info) return -EBUSY; - p_ll2_info->conn = *p_params; + memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); - rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); + p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ? + CORE_TX_DEST_NW : CORE_TX_DEST_LB; + + /* Correct maximum number of Tx BDs */ + p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; + if (*p_tx_max == 0) + *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET; + else + *p_tx_max = min_t(u8, *p_tx_max, + CORE_LL2_TX_MAX_BDS_PER_PACKET); + rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info); if (rc) goto q_allocate_fail; - rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc); + rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info); if (rc) goto q_allocate_fail; rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, - rx_num_desc * 2, p_params->mtu); + data->input.mtu); if (rc) goto q_allocate_fail; /* Register callbacks for the Rx/Tx queues */ - if (p_params->conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { comp_rx_cb = qed_ll2_lb_rxq_completion; comp_tx_cb = qed_ll2_lb_txq_completion; } else { @@ -1380,7 +1350,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, comp_tx_cb = qed_ll2_txq_completion; } - if (rx_num_desc) { + if (data->input.rx_num_desc) { qed_int_register_cb(p_hwfn, comp_rx_cb, &p_hwfn->p_ll2_info[i], &p_ll2_info->rx_queue.rx_sb_index, @@ -1388,7 +1358,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, p_ll2_info->rx_queue.b_cb_registred = true; } - if (tx_num_desc) { + if (data->input.tx_num_desc) { qed_int_register_cb(p_hwfn, comp_tx_cb, &p_hwfn->p_ll2_info[i], @@ -1397,7 +1367,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, p_ll2_info->tx_queue.b_cb_registred = true; } - *p_connection_handle = i; + *data->p_connection_handle = i; return rc; q_allocate_fail: @@ -1408,18 +1378,21 @@ q_allocate_fail: static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { + enum qed_ll2_error_handle error_input; + enum core_error_handle error_mode; u8 action_on_error = 0; if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) return 0; DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0); - + error_input = p_ll2_conn->input.ai_err_packet_too_big; + error_mode = qed_ll2_get_error_choice(error_input); SET_FIELD(action_on_error, - CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, - p_ll2_conn->conn.ai_err_packet_too_big); - SET_FIELD(action_on_error, - CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf); + CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode); + error_input = p_ll2_conn->input.ai_err_no_buf; + error_mode = qed_ll2_get_error_choice(error_input); + SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode); return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); } @@ -1503,7 +1476,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { qed_llh_add_protocol_filter(p_hwfn, p_ptt, 0x8906, 0, QED_LLH_FILTER_ETHERTYPE); @@ -1667,7 +1640,7 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", p_ll2->queue_id, p_ll2->cid, - p_ll2->conn.conn_type, + p_ll2->input.conn_type, prod_idx, pkt->first_frag_len, pkt->num_of_bds, @@ -1742,7 +1715,8 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", p_ll2_conn->queue_id, - p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod); + p_ll2_conn->cid, + p_ll2_conn->input.conn_type, db_msg.spq_prod); } int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, @@ -1866,10 +1840,10 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) qed_ll2_rxq_flush(p_hwfn, connection_handle); } - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); - if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_FCOE) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { qed_llh_remove_protocol_filter(p_hwfn, p_ptt, 0x8906, 0, QED_LLH_FILTER_ETHERTYPE); @@ -2054,11 +2028,68 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev, cdev->ll2->cb_cookie = cookie; } +static void qed_ll2_set_conn_data(struct qed_dev *cdev, + struct qed_ll2_acquire_data *data, + struct qed_ll2_params *params, + enum qed_ll2_conn_type conn_type, + u8 *handle, bool lb, u8 gsi_enable) +{ + memset(data, 0, sizeof(*data)); + + data->input.conn_type = conn_type; + data->input.mtu = params->mtu; + data->input.rx_num_desc = QED_LL2_RX_SIZE; + data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets; + data->input.rx_vlan_removal_en = params->rx_vlan_stripping; + data->input.tx_num_desc = QED_LL2_TX_SIZE; + data->input.gsi_enable = gsi_enable; + data->p_connection_handle = handle; + if (lb) { + data->input.tx_tc = OOO_LB_TC; + data->input.tx_dest = QED_LL2_TX_DEST_LB; + } else { + data->input.tx_tc = 0; + data->input.tx_dest = QED_LL2_TX_DEST_NW; + } +} + +static int qed_ll2_start_ooo(struct qed_dev *cdev, + struct qed_ll2_params *params) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + struct qed_ll2_acquire_data data; + int rc; + + qed_ll2_set_conn_data(cdev, &data, params, + QED_LL2_TYPE_ISCSI_OOO, handle, true, 0); + + rc = qed_ll2_acquire_connection(hwfn, &data); + if (rc) { + DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n"); + goto out; + } + + rc = qed_ll2_establish_connection(hwfn, *handle); + if (rc) { + DP_INFO(cdev, "Failed to establist LL2 OOO connection\n"); + goto fail; + } + + return 0; + +fail: + qed_ll2_release_connection(hwfn, *handle); +out: + *handle = QED_LL2_UNUSED_HANDLE; + return rc; +} + static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) { - struct qed_ll2_conn ll2_info; struct qed_ll2_buffer *buffer, *tmp_buffer; enum qed_ll2_conn_type conn_type; + struct qed_ll2_acquire_data data; struct qed_ptt *p_ptt; int rc, i; u8 gsi_enable = 1; @@ -2106,20 +2137,10 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) conn_type = QED_LL2_TYPE_TEST; } - /* Prepare the temporary ll2 information */ - memset(&ll2_info, 0, sizeof(ll2_info)); - - ll2_info.conn_type = conn_type; - ll2_info.mtu = params->mtu; - ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; - ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; - ll2_info.tx_tc = 0; - ll2_info.tx_dest = CORE_TX_DEST_NW; - ll2_info.gsi_enable = gsi_enable; + qed_ll2_set_conn_data(cdev, &data, params, conn_type, + &cdev->ll2->handle, false, gsi_enable); - rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, - QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, - &cdev->ll2->handle); + rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data); if (rc) { DP_INFO(cdev, "Failed to acquire LL2 connection\n"); goto fail; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 96af50b733e8..3caaad53c958 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -47,17 +47,6 @@ #define QED_MAX_NUM_OF_LL2_CONNECTIONS (4) -enum qed_ll2_conn_type { - QED_LL2_TYPE_FCOE, - QED_LL2_TYPE_ISCSI, - QED_LL2_TYPE_TEST, - QED_LL2_TYPE_ISCSI_OOO, - QED_LL2_TYPE_RESERVED2, - QED_LL2_TYPE_ROCE, - QED_LL2_TYPE_RESERVED3, - MAX_QED_LL2_RX_CONN_TYPE -}; - struct qed_ll2_rx_packet { struct list_head list_entry; struct core_rx_bd_with_buff_len *rxq_bd; @@ -123,27 +112,17 @@ struct qed_ll2_tx_queue { bool b_completing_packet; }; -struct qed_ll2_conn { - enum qed_ll2_conn_type conn_type; - u16 mtu; - u8 rx_drop_ttl0_flg; - u8 rx_vlan_removal_en; - u8 tx_tc; - enum core_tx_dest tx_dest; - enum core_error_handle ai_err_packet_too_big; - enum core_error_handle ai_err_no_buf; - u8 gsi_enable; -}; - struct qed_ll2_info { /* Lock protecting the state of LL2 */ struct mutex mutex; - struct qed_ll2_conn conn; + + struct qed_ll2_acquire_data_inputs input; u32 cid; u8 my_id; u8 queue_id; u8 tx_stats_id; bool b_active; + enum core_tx_dest tx_dest; u8 tx_stats_en; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; @@ -154,20 +133,13 @@ struct qed_ll2_info { * starts rx & tx (if relevant) queues pair. Provides * connecion handler as output parameter. * - * @param p_hwfn - * @param p_params Contain various configuration properties - * @param rx_num_desc - * @param tx_num_desc - * - * @param p_connection_handle Output container for LL2 connection's handle * - * @return 0 on success, failure otherwise + * @param p_hwfn + * @param data - describes connection parameters + * @return int */ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, - struct qed_ll2_conn *p_params, - u16 rx_num_desc, - u16 tx_num_desc, - u8 *p_connection_handle); + struct qed_ll2_acquire_data *data); /** * @brief qed_ll2_establish_connection - start previously diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index afc63c0e7c44..33abcf110260 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -2818,7 +2818,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev, { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); struct qed_roce_ll2_info *roce_ll2; - struct qed_ll2_conn ll2_params; + struct qed_ll2_acquire_data data; int rc; if (!params) { @@ -2844,25 +2844,26 @@ static int qed_roce_ll2_start(struct qed_dev *cdev, DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n"); return -ENOMEM; } + roce_ll2->handle = QED_LL2_UNUSED_HANDLE; roce_ll2->cbs = params->cbs; roce_ll2->cb_cookie = params->cb_cookie; mutex_init(&roce_ll2->lock); - memset(&ll2_params, 0, sizeof(ll2_params)); - ll2_params.conn_type = QED_LL2_TYPE_ROCE; - ll2_params.mtu = params->mtu; - ll2_params.rx_drop_ttl0_flg = true; - ll2_params.rx_vlan_removal_en = false; - ll2_params.tx_dest = CORE_TX_DEST_NW; - ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET; - ll2_params.ai_err_no_buf = LL2_DROP_PACKET; - ll2_params.gsi_enable = true; - - rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params, - params->max_rx_buffers, - params->max_tx_buffers, - &roce_ll2->handle); + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_ROCE; + data.input.mtu = params->mtu; + data.input.rx_num_desc = params->max_rx_buffers; + data.input.tx_num_desc = params->max_tx_buffers; + data.input.rx_drop_ttl0_flg = true; + data.input.rx_vlan_removal_en = false; + data.input.tx_dest = QED_LL2_TX_DEST_NW; + data.input.ai_err_packet_too_big = LL2_DROP_PACKET; + data.input.ai_err_no_buf = LL2_DROP_PACKET; + data.p_connection_handle = &roce_ll2->handle; + data.input.gsi_enable = true; + + rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data); if (rc) { DP_ERR(cdev, "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n", diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 056ac007dd12..a1f63eca430a 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -43,6 +43,17 @@ #include #include +enum qed_ll2_conn_type { + QED_LL2_TYPE_FCOE, + QED_LL2_TYPE_ISCSI, + QED_LL2_TYPE_TEST, + QED_LL2_TYPE_ISCSI_OOO, + QED_LL2_TYPE_RESERVED2, + QED_LL2_TYPE_ROCE, + QED_LL2_TYPE_RESERVED3, + MAX_QED_LL2_RX_CONN_TYPE +}; + enum qed_ll2_roce_flavor_type { QED_LL2_ROCE, QED_LL2_RROCE, @@ -55,6 +66,12 @@ enum qed_ll2_tx_dest { QED_LL2_TX_DEST_MAX }; +enum qed_ll2_error_handle { + QED_LL2_DROP_PACKET, + QED_LL2_DO_NOTHING, + QED_LL2_ASSERT, +}; + struct qed_ll2_stats { u64 gsi_invalid_hdr; u64 gsi_invalid_pkt_length; @@ -105,6 +122,28 @@ struct qed_ll2_comp_rx_data { } u; }; +struct qed_ll2_acquire_data_inputs { + enum qed_ll2_conn_type conn_type; + u16 mtu; + u16 rx_num_desc; + u16 rx_num_ooo_buffers; + u8 rx_drop_ttl0_flg; + u8 rx_vlan_removal_en; + u16 tx_num_desc; + u8 tx_max_bds_per_packet; + u8 tx_tc; + enum qed_ll2_tx_dest tx_dest; + enum qed_ll2_error_handle ai_err_packet_too_big; + enum qed_ll2_error_handle ai_err_no_buf; + u8 gsi_enable; +}; + +struct qed_ll2_acquire_data { + struct qed_ll2_acquire_data_inputs input; + /* Output container for LL2 connection's handle */ + u8 *p_connection_handle; +}; + struct qed_ll2_tx_pkt_info { void *cookie; dma_addr_t first_frag; -- cgit v1.2.3 From 0518c12f1f79dc2f2020836974c577404e42ae89 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Fri, 9 Jun 2017 17:13:22 +0300 Subject: qed*: LL2 callback operations LL2 today is interrupt driven - when tx/rx completion arrives [or any other indication], qed needs to operate on the connection and pass the information to the protocol-driver [or internal qed consumer]. Since we have several flavors of ll2 employeed by the driver, each handler needs to do an if-else to determine the right functionality to use based on the connection type. In order to make things more scalable [given that we're going to add additional types of ll2 flavors] move the infrastrucutre into using a callback-based approach - the callbacks would be provided as part of the connection's initialization parameters. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/main.c | 6 +- drivers/infiniband/hw/qedr/qedr.h | 2 + drivers/infiniband/hw/qedr/qedr_cm.c | 238 ++++++++++++++++------ drivers/net/ethernet/qlogic/qed/qed.h | 1 - drivers/net/ethernet/qlogic/qed/qed_ll2.c | 197 +++++++++---------- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 34 ++-- drivers/net/ethernet/qlogic/qed/qed_roce.c | 304 ++--------------------------- drivers/net/ethernet/qlogic/qed/qed_roce.h | 43 ---- include/linux/qed/qed_ll2_if.h | 36 ++++ include/linux/qed/qed_roce_if.h | 80 +++----- 10 files changed, 384 insertions(+), 557 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 6a72095d6c7a..485c1fef238b 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -886,9 +886,9 @@ static void qedr_mac_address_change(struct qedr_dev *dev) memcpy(&sgid->raw[8], guid, sizeof(guid)); /* Update LL2 */ - rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev, - dev->gsi_ll2_mac_address, - dev->ndev->dev_addr); + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, + dev->ndev->dev_addr); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index aa08c76a4245..80333ec2c8b6 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -150,6 +150,8 @@ struct qedr_dev { u32 dp_module; u8 dp_level; u8 num_hwfns; + u8 gsi_ll2_handle; + uint wq_multiplier; u8 gsi_ll2_mac_address[ETH_ALEN]; int gsi_qp_created; diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index d86dbe814d98..eb3dce72fc21 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -64,9 +64,14 @@ void qedr_store_gsi_qp_cq(struct qedr_dev *dev, struct qedr_qp *qp, dev->gsi_qp = qp; } -void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) +void qedr_ll2_complete_tx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet) { - struct qedr_dev *dev = (struct qedr_dev *)_qdev; + struct qedr_dev *dev = (struct qedr_dev *)cxt; + struct qed_roce_ll2_packet *pkt = cookie; struct qedr_cq *cq = dev->gsi_sqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; @@ -88,20 +93,26 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } -void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, - struct qed_roce_ll2_rx_params *params) +void qedr_ll2_complete_rx_packet(void *cxt, + struct qed_ll2_comp_rx_data *data) { - struct qedr_dev *dev = (struct qedr_dev *)_dev; + struct qedr_dev *dev = (struct qedr_dev *)cxt; struct qedr_cq *cq = dev->gsi_rqcq; struct qedr_qp *qp = dev->gsi_qp; unsigned long flags; spin_lock_irqsave(&qp->q_lock, flags); - qp->rqe_wr_id[qp->rq.gsi_cons].rc = params->rc; - qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = params->vlan_id; - qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = pkt->payload[0].len; - ether_addr_copy(qp->rqe_wr_id[qp->rq.gsi_cons].smac, params->smac); + qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? + -EINVAL : 0; + qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; + /* note: length stands for data length i.e. GRH is excluded */ + qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = + data->length.data_length; + *((u32 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[0]) = + ntohl(data->opaque_data_0); + *((u16 *)&qp->rqe_wr_id[qp->rq.gsi_cons].smac[4]) = + ntohs((u16)data->opaque_data_1); qedr_inc_sw_gsi_cons(&qp->rq); @@ -111,6 +122,14 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); } +void qedr_ll2_release_rx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, bool b_last_packet) +{ + /* Do nothing... */ +} + static void qedr_destroy_gsi_cq(struct qedr_dev *dev, struct ib_qp_init_attr *attrs) { @@ -159,27 +178,159 @@ static inline int qedr_check_gsi_qp_attrs(struct qedr_dev *dev, return 0; } +static int qedr_ll2_post_tx(struct qedr_dev *dev, + struct qed_roce_ll2_packet *pkt) +{ + enum qed_ll2_roce_flavor_type roce_flavor; + struct qed_ll2_tx_pkt_info ll2_tx_pkt; + int rc; + int i; + + memset(&ll2_tx_pkt, 0, sizeof(ll2_tx_pkt)); + + roce_flavor = (pkt->roce_mode == ROCE_V1) ? + QED_LL2_ROCE : QED_LL2_RROCE; + + if (pkt->roce_mode == ROCE_V2_IPV4) + ll2_tx_pkt.enable_ip_cksum = 1; + + ll2_tx_pkt.num_of_bds = 1 /* hdr */ + pkt->n_seg; + ll2_tx_pkt.vlan = 0; + ll2_tx_pkt.tx_dest = pkt->tx_dest; + ll2_tx_pkt.qed_roce_flavor = roce_flavor; + ll2_tx_pkt.first_frag = pkt->header.baddr; + ll2_tx_pkt.first_frag_len = pkt->header.len; + ll2_tx_pkt.cookie = pkt; + + /* tx header */ + rc = dev->ops->ll2_prepare_tx_packet(dev->rdma_ctx, + dev->gsi_ll2_handle, + &ll2_tx_pkt, 1); + if (rc) { + /* TX failed while posting header - release resources */ + dma_free_coherent(&dev->pdev->dev, pkt->header.len, + pkt->header.vaddr, pkt->header.baddr); + kfree(pkt); + + DP_ERR(dev, "roce ll2 tx: header failed (rc=%d)\n", rc); + return rc; + } + + /* tx payload */ + for (i = 0; i < pkt->n_seg; i++) { + rc = dev->ops->ll2_set_fragment_of_tx_packet( + dev->rdma_ctx, + dev->gsi_ll2_handle, + pkt->payload[i].baddr, + pkt->payload[i].len); + + if (rc) { + /* if failed not much to do here, partial packet has + * been posted we can't free memory, will need to wait + * for completion + */ + DP_ERR(dev, "ll2 tx: payload failed (rc=%d)\n", rc); + return rc; + } + } + + return 0; +} + +int qedr_ll2_stop(struct qedr_dev *dev) +{ + int rc; + + if (dev->gsi_ll2_handle == QED_LL2_UNUSED_HANDLE) + return 0; + + /* remove LL2 MAC address filter */ + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, NULL); + + rc = dev->ops->ll2_terminate_connection(dev->rdma_ctx, + dev->gsi_ll2_handle); + if (rc) + DP_ERR(dev, "Failed to terminate LL2 connection (rc=%d)\n", rc); + + dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); + + dev->gsi_ll2_handle = QED_LL2_UNUSED_HANDLE; + + return rc; +} + +int qedr_ll2_start(struct qedr_dev *dev, + struct ib_qp_init_attr *attrs, struct qedr_qp *qp) +{ + struct qed_ll2_acquire_data data; + struct qed_ll2_cbs cbs; + int rc; + + /* configure and start LL2 */ + cbs.rx_comp_cb = qedr_ll2_complete_rx_packet; + cbs.tx_comp_cb = qedr_ll2_complete_tx_packet; + cbs.rx_release_cb = qedr_ll2_release_rx_packet; + cbs.tx_release_cb = qedr_ll2_complete_tx_packet; + cbs.cookie = dev; + + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_ROCE; + data.input.mtu = dev->ndev->mtu; + data.input.rx_num_desc = attrs->cap.max_recv_wr; + data.input.rx_drop_ttl0_flg = true; + data.input.rx_vlan_removal_en = false; + data.input.tx_num_desc = attrs->cap.max_send_wr; + data.input.tx_tc = 0; + data.input.tx_dest = QED_LL2_TX_DEST_NW; + data.input.ai_err_packet_too_big = QED_LL2_DROP_PACKET; + data.input.ai_err_no_buf = QED_LL2_DROP_PACKET; + data.input.gsi_enable = 1; + data.p_connection_handle = &dev->gsi_ll2_handle; + data.cbs = &cbs; + + rc = dev->ops->ll2_acquire_connection(dev->rdma_ctx, &data); + if (rc) { + DP_ERR(dev, + "ll2 start: failed to acquire LL2 connection (rc=%d)\n", + rc); + return rc; + } + + rc = dev->ops->ll2_establish_connection(dev->rdma_ctx, + dev->gsi_ll2_handle); + if (rc) { + DP_ERR(dev, + "ll2 start: failed to establish LL2 connection (rc=%d)\n", + rc); + goto err1; + } + + rc = dev->ops->ll2_set_mac_filter(dev->cdev, NULL, dev->ndev->dev_addr); + if (rc) + goto err2; + + return 0; + +err2: + dev->ops->ll2_terminate_connection(dev->rdma_ctx, dev->gsi_ll2_handle); +err1: + dev->ops->ll2_release_connection(dev->rdma_ctx, dev->gsi_ll2_handle); + + return rc; +} + struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs, struct qedr_qp *qp) { - struct qed_roce_ll2_params ll2_params; int rc; rc = qedr_check_gsi_qp_attrs(dev, attrs); if (rc) return ERR_PTR(rc); - /* configure and start LL2 */ - memset(&ll2_params, 0, sizeof(ll2_params)); - ll2_params.max_tx_buffers = attrs->cap.max_send_wr; - ll2_params.max_rx_buffers = attrs->cap.max_recv_wr; - ll2_params.cbs.tx_cb = qedr_ll2_tx_cb; - ll2_params.cbs.rx_cb = qedr_ll2_rx_cb; - ll2_params.cb_cookie = (void *)dev; - ll2_params.mtu = dev->ndev->mtu; - ether_addr_copy(ll2_params.mac_address, dev->ndev->dev_addr); - rc = dev->ops->roce_ll2_start(dev->cdev, &ll2_params); + rc = qedr_ll2_start(dev, attrs, qp); if (rc) { DP_ERR(dev, "create gsi qp: failed on ll2 start. rc=%d\n", rc); return ERR_PTR(rc); @@ -214,7 +365,7 @@ struct ib_qp *qedr_create_gsi_qp(struct qedr_dev *dev, err: kfree(qp->rqe_wr_id); - rc = dev->ops->roce_ll2_stop(dev->cdev); + rc = qedr_ll2_stop(dev); if (rc) DP_ERR(dev, "create gsi qp: failed destroy on create\n"); @@ -223,15 +374,7 @@ err: int qedr_destroy_gsi_qp(struct qedr_dev *dev) { - int rc; - - rc = dev->ops->roce_ll2_stop(dev->cdev); - if (rc) - DP_ERR(dev, "destroy gsi qp: failed (rc=%d)\n", rc); - else - DP_DEBUG(dev, QEDR_MSG_GSI, "destroy gsi qp: success\n"); - - return rc; + return qedr_ll2_stop(dev); } #define QEDR_MAX_UD_HEADER_SIZE (100) @@ -421,7 +564,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, { struct qed_roce_ll2_packet *pkt = NULL; struct qedr_qp *qp = get_qedr_qp(ibqp); - struct qed_roce_ll2_tx_params params; struct qedr_dev *dev = qp->dev; unsigned long flags; int rc; @@ -449,8 +591,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto err; } - memset(¶ms, 0, sizeof(params)); - spin_lock_irqsave(&qp->q_lock, flags); rc = qedr_gsi_build_packet(dev, qp, wr, &pkt); @@ -459,7 +599,8 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto err; } - rc = dev->ops->roce_ll2_tx(dev->cdev, pkt, ¶ms); + rc = qedr_ll2_post_tx(dev, pkt); + if (!rc) { qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; qedr_inc_sw_prod(&qp->sq); @@ -467,17 +608,6 @@ int qedr_gsi_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, "gsi post send: opcode=%d, in_irq=%ld, irqs_disabled=%d, wr_id=%llx\n", wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); } else { - if (rc == QED_ROCE_TX_HEAD_FAILURE) { - /* TX failed while posting header - release resources */ - dma_free_coherent(&dev->pdev->dev, pkt->header.len, - pkt->header.vaddr, pkt->header.baddr); - kfree(pkt); - } else if (rc == QED_ROCE_TX_FRAG_FAILURE) { - /* NTD since TX failed while posting a fragment. We will - * release the resources on TX callback - */ - } - DP_ERR(dev, "gsi post send: failed to transmit (rc=%d)\n", rc); rc = -EAGAIN; *bad_wr = wr; @@ -504,10 +634,8 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, { struct qedr_dev *dev = get_qedr_dev(ibqp->device); struct qedr_qp *qp = get_qedr_qp(ibqp); - struct qed_roce_ll2_buffer buf; unsigned long flags; - int status = 0; - int rc; + int rc = 0; if ((qp->state != QED_ROCE_QP_STATE_RTR) && (qp->state != QED_ROCE_QP_STATE_RTS)) { @@ -518,8 +646,6 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, return -EINVAL; } - memset(&buf, 0, sizeof(buf)); - spin_lock_irqsave(&qp->q_lock, flags); while (wr) { @@ -530,10 +656,12 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, goto err; } - buf.baddr = wr->sg_list[0].addr; - buf.len = wr->sg_list[0].length; - - rc = dev->ops->roce_ll2_post_rx_buffer(dev->cdev, &buf, 0, 1); + rc = dev->ops->ll2_post_rx_buffer(dev->rdma_ctx, + dev->gsi_ll2_handle, + wr->sg_list[0].addr, + wr->sg_list[0].length, + 0 /* cookie */, + 1 /* notify_fw */); if (rc) { DP_ERR(dev, "gsi post recv: failed to post rx buffer (rc=%d)\n", @@ -553,7 +681,7 @@ int qedr_gsi_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_unlock_irqrestore(&qp->q_lock, flags); - return status; + return rc; err: spin_unlock_irqrestore(&qp->q_lock, flags); *bad_wr = wr; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d7afc42f766c..14b08ee9e3ad 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -552,7 +552,6 @@ struct qed_hwfn { #endif struct z_stream_s *stream; - struct qed_roce_ll2_info *ll2; }; struct pci_params { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index f3aad61e6394..b222b2b471e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -89,13 +89,14 @@ struct qed_ll2_buffer { dma_addr_t phys_addr; }; -static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn, +static void qed_ll2b_complete_tx_packet(void *cxt, u8 connection_handle, void *cookie, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet) { + struct qed_hwfn *p_hwfn = cxt; struct qed_dev *cdev = p_hwfn->cdev; struct sk_buff *skb = cookie; @@ -164,9 +165,9 @@ static void qed_ll2_kill_buffers(struct qed_dev *cdev) qed_ll2_dealloc_buffer(cdev, buffer); } -static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, - struct qed_ll2_comp_rx_data *data) +void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_buffer *buffer = data->cookie; struct qed_dev *cdev = p_hwfn->cdev; dma_addr_t new_phys_addr; @@ -327,21 +328,12 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->input.gsi_enable) - qed_ll2b_release_tx_gsi_packet(p_hwfn, - p_ll2_conn-> - my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); - else - qed_ll2b_complete_tx_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, - b_last_packet); + p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); } } } @@ -354,7 +346,6 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) struct qed_ll2_tx_packet *p_pkt; bool b_last_frag = false; unsigned long flags; - dma_addr_t tx_frag; int rc = -EINVAL; spin_lock_irqsave(&p_tx->lock, flags); @@ -395,19 +386,13 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); spin_unlock_irqrestore(&p_tx->lock, flags); - tx_frag = p_pkt->bds_set[0].tx_frag; - if (p_ll2_conn->input.gsi_enable) - qed_ll2b_complete_tx_gsi_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, !num_bds); - else - qed_ll2b_complete_tx_packet(p_hwfn, - p_ll2_conn->my_id, - p_pkt->cookie, - tx_frag, - b_last_frag, !num_bds); + + p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + p_pkt->cookie, + p_pkt->bds_set[0].tx_frag, + b_last_frag, !num_bds); + spin_lock_irqsave(&p_tx->lock, flags); } @@ -418,52 +403,16 @@ out: return rc; } -static int -qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn, - struct qed_ll2_info *p_ll2_info, - union core_rx_cqe_union *p_cqe, - unsigned long lock_flags, bool b_last_cqe) +static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, + union core_rx_cqe_union *p_cqe, + struct qed_ll2_comp_rx_data *data) { - struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue; - struct qed_ll2_rx_packet *p_pkt = NULL; - u16 packet_length, parse_flags, vlan; - u32 src_mac_addrhi; - u16 src_mac_addrlo; - - if (!list_empty(&p_rx->active_descq)) - p_pkt = list_first_entry(&p_rx->active_descq, - struct qed_ll2_rx_packet, list_entry); - if (!p_pkt) { - DP_NOTICE(p_hwfn, - "GSI Rx completion but active_descq is empty\n"); - return -EIO; - } - - list_del(&p_pkt->list_entry); - parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags); - packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length); - vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan); - src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); - src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); - if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) - DP_NOTICE(p_hwfn, - "Mismatch between active_descq and the LL2 Rx chain\n"); - list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); - - spin_unlock_irqrestore(&p_rx->lock, lock_flags); - qed_ll2b_complete_rx_gsi_packet(p_hwfn, - p_ll2_info->my_id, - p_pkt->cookie, - p_pkt->rx_buf_addr, - packet_length, - p_cqe->rx_cqe_gsi.data_length_error, - parse_flags, - vlan, - src_mac_addrhi, - src_mac_addrlo, b_last_cqe); - spin_lock_irqsave(&p_rx->lock, lock_flags); - - return 0; + data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags); + data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length); + data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan); + data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); + data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); + data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; } static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, @@ -501,7 +450,10 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, } list_del(&p_pkt->list_entry); - qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); + if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR) + qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); + else + qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data); if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) DP_NOTICE(p_hwfn, "Mismatch between active_descq and the LL2 Rx chain\n"); @@ -514,7 +466,8 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, data.b_last_packet = b_last_cqe; spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); - qed_ll2b_complete_rx_packet(p_hwfn, &data); + p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data); + spin_lock_irqsave(&p_rx->lock, *p_lock_flags); return 0; @@ -552,9 +505,6 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) rc = -EINVAL; break; case CORE_RX_CQE_TYPE_GSI_OFFLOAD: - rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn, - cqe, flags, b_last_cqe); - break; case CORE_RX_CQE_TYPE_REGULAR: rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn, cqe, &flags, @@ -1244,6 +1194,23 @@ out: return rc; } +static int +qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) +{ + if (!cbs || (!cbs->rx_comp_cb || + !cbs->rx_release_cb || + !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie)) + return -EINVAL; + + p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb; + p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; + p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; + p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; + p_ll2_info->cbs.cookie = cbs->cookie; + + return 0; +} + static enum core_error_handle qed_ll2_get_error_choice(enum qed_ll2_error_handle err) { @@ -1259,9 +1226,9 @@ qed_ll2_get_error_choice(enum qed_ll2_error_handle err) } } -int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, - struct qed_ll2_acquire_data *data) +int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) { + struct qed_hwfn *p_hwfn = cxt; qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; struct qed_ll2_info *p_ll2_info = NULL; u8 i, *p_tx_max; @@ -1298,6 +1265,13 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, else *p_tx_max = min_t(u8, *p_tx_max, CORE_LL2_TX_MAX_BDS_PER_PACKET); + + rc = qed_ll2_set_cbs(p_ll2_info, data->cbs); + if (rc) { + DP_NOTICE(p_hwfn, "Invalid callback functions\n"); + goto q_allocate_fail; + } + rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info); if (rc) goto q_allocate_fail; @@ -1377,8 +1351,10 @@ qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); } -int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) + +int qed_ll2_establish_connection(void *cxt, u8 connection_handle) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn; struct qed_ll2_rx_queue *p_rx; struct qed_ll2_tx_queue *p_tx; @@ -1505,11 +1481,12 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); } -int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, +int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, dma_addr_t addr, u16 buf_len, void *cookie, u8 notify_fw) { + struct qed_hwfn *p_hwfn = cxt; struct core_rx_bd_with_buff_len *p_curb = NULL; struct qed_ll2_rx_packet *p_curp = NULL; struct qed_ll2_info *p_ll2_conn; @@ -1699,11 +1676,12 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, p_ll2_conn->input.conn_type, db_msg.spq_prod); } -int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, +int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, struct qed_ll2_tx_pkt_info *pkt, bool notify_fw) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_tx_packet *p_curp = NULL; struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_tx_queue *p_tx; @@ -1750,11 +1728,12 @@ out: return rc; } -int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, +int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes) { struct qed_ll2_tx_packet *p_cur_send_packet = NULL; + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; u16 cur_send_frag_num = 0; struct core_tx_bd *p_bd; @@ -1789,8 +1768,9 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, return 0; } -int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) +int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; int rc = -EINVAL; struct qed_ptt *p_ptt; @@ -1855,8 +1835,10 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, kfree(p_buffer); } } -void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) + +void qed_ll2_release_connection(void *cxt, u8 connection_handle) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); @@ -1989,9 +1971,10 @@ static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn, p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts); } -int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, +int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats) { + struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ptt *p_ptt; @@ -2018,6 +2001,17 @@ int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, return 0; } +static void qed_ll2b_release_rx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, + bool b_last_packet) +{ + struct qed_hwfn *p_hwfn = cxt; + + qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie); +} + static void qed_ll2_register_cb_ops(struct qed_dev *cdev, const struct qed_ll2_cb_ops *ops, void *cookie) @@ -2026,11 +2020,18 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev, cdev->ll2->cb_cookie = cookie; } +struct qed_ll2_cbs ll2_cbs = { + .rx_comp_cb = &qed_ll2b_complete_rx_packet, + .rx_release_cb = &qed_ll2b_release_rx_packet, + .tx_comp_cb = &qed_ll2b_complete_tx_packet, + .tx_release_cb = &qed_ll2b_complete_tx_packet, +}; + static void qed_ll2_set_conn_data(struct qed_dev *cdev, struct qed_ll2_acquire_data *data, struct qed_ll2_params *params, enum qed_ll2_conn_type conn_type, - u8 *handle, bool lb, u8 gsi_enable) + u8 *handle, bool lb) { memset(data, 0, sizeof(*data)); @@ -2040,8 +2041,10 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev, data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets; data->input.rx_vlan_removal_en = params->rx_vlan_stripping; data->input.tx_num_desc = QED_LL2_TX_SIZE; - data->input.gsi_enable = gsi_enable; data->p_connection_handle = handle; + data->cbs = &ll2_cbs; + ll2_cbs.cookie = QED_LEADING_HWFN(cdev); + if (lb) { data->input.tx_tc = OOO_LB_TC; data->input.tx_dest = QED_LL2_TX_DEST_LB; @@ -2060,7 +2063,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev, int rc; qed_ll2_set_conn_data(cdev, &data, params, - QED_LL2_TYPE_ISCSI_OOO, handle, true, 0); + QED_LL2_TYPE_ISCSI_OOO, handle, true); rc = qed_ll2_acquire_connection(hwfn, &data); if (rc) { @@ -2090,7 +2093,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) struct qed_ll2_acquire_data data; struct qed_ptt *p_ptt; int rc, i; - u8 gsi_enable = 1; + /* Initialize LL2 locks & lists */ INIT_LIST_HEAD(&cdev->ll2->list); @@ -2122,11 +2125,9 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { case QED_PCI_FCOE: conn_type = QED_LL2_TYPE_FCOE; - gsi_enable = 0; break; case QED_PCI_ISCSI: conn_type = QED_LL2_TYPE_ISCSI; - gsi_enable = 0; break; case QED_PCI_ETH_ROCE: conn_type = QED_LL2_TYPE_ROCE; @@ -2136,7 +2137,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) } qed_ll2_set_conn_data(cdev, &data, params, conn_type, - &cdev->ll2->handle, false, gsi_enable); + &cdev->ll2->handle, false); rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data); if (rc) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 3caaad53c958..a822528e9c63 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -126,6 +126,7 @@ struct qed_ll2_info { u8 tx_stats_en; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; + struct qed_ll2_cbs cbs; }; /** @@ -134,30 +135,29 @@ struct qed_ll2_info { * connecion handler as output parameter. * * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param data - describes connection parameters * @return int */ -int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, - struct qed_ll2_acquire_data *data); +int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data); /** * @brief qed_ll2_establish_connection - start previously * allocated LL2 queues pair * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param p_ptt * @param connection_handle LL2 connection's handle obtained from * qed_ll2_require_connection * * @return 0 on success, failure otherwise */ -int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); +int qed_ll2_establish_connection(void *cxt, u8 connection_handle); /** * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle obtained from * qed_ll2_require_connection * @param addr rx (physical address) buffers to submit @@ -166,7 +166,7 @@ int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); * * @return 0 on success, failure otherwise */ -int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, +int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, dma_addr_t addr, u16 buf_len, void *cookie, u8 notify_fw); @@ -175,14 +175,14 @@ int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, * @brief qed_ll2_prepare_tx_packet - request for start Tx BD * to prepare Tx packet submission to FW. * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle * @param pkt - info regarding the tx packet * @param notify_fw - issue doorbell to fw for this packet * * @return 0 on success, failure otherwise */ -int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, +int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, struct qed_ll2_tx_pkt_info *pkt, bool notify_fw); @@ -191,18 +191,18 @@ int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, * @brief qed_ll2_release_connection - releases resources * allocated for LL2 connection * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle obtained from * qed_ll2_require_connection */ -void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); +void qed_ll2_release_connection(void *cxt, u8 connection_handle); /** * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill * Tx BD of BDs requested by * qed_ll2_prepare_tx_packet * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle * obtained from * qed_ll2_require_connection @@ -211,7 +211,7 @@ void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); * * @return 0 on success, failure otherwise */ -int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, +int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes); @@ -219,27 +219,27 @@ int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, * @brief qed_ll2_terminate_connection - stops Tx/Rx queues * * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle * obtained from * qed_ll2_require_connection * * @return 0 on success, failure otherwise */ -int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); +int qed_ll2_terminate_connection(void *cxt, u8 connection_handle); /** * @brief qed_ll2_get_stats - get LL2 queue's statistics * * - * @param p_hwfn + * @param cxt - pointer to the hw-function [opaque to some] * @param connection_handle LL2 connection's handle obtained from * qed_ll2_require_connection * @param p_stats * * @return 0 on success, failure otherwise */ -int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, +int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats); /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 33abcf110260..4bc2f6c47f69 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include @@ -65,6 +64,7 @@ #include "qed_sp.h" #include "qed_roce.h" #include "qed_ll2.h" +#include static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); @@ -2709,310 +2709,35 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } -void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet) -{ - struct qed_roce_ll2_packet *packet = cookie; - struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2; - - roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet); -} - -void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet) -{ - qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle, - cookie, first_frag_addr, - b_last_fragment, b_last_packet); -} - -void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t rx_buf_addr, - u16 data_length, - u8 data_length_error, - u16 parse_flags, - u16 vlan, - u32 src_mac_addr_hi, - u16 src_mac_addr_lo, bool b_last_packet) -{ - struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2; - struct qed_roce_ll2_rx_params params; - struct qed_dev *cdev = p_hwfn->cdev; - struct qed_roce_ll2_packet pkt; - - DP_VERBOSE(cdev, - QED_MSG_LL2, - "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n", - (void *)(uintptr_t)rx_buf_addr, - data_length, data_length_error); - - memset(&pkt, 0, sizeof(pkt)); - pkt.n_seg = 1; - pkt.payload[0].baddr = rx_buf_addr; - pkt.payload[0].len = data_length; - - memset(¶ms, 0, sizeof(params)); - params.vlan_id = vlan; - *((u32 *)¶ms.smac[0]) = ntohl(src_mac_addr_hi); - *((u16 *)¶ms.smac[4]) = ntohs(src_mac_addr_lo); - - if (data_length_error) { - DP_ERR(cdev, - "roce ll2 rx complete: data length error %d, length=%d\n", - data_length_error, data_length); - params.rc = -EINVAL; - } - - roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, ¶ms); -} - static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, u8 *old_mac_address, u8 *new_mac_address) { - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_ptt *p_ptt; int rc = 0; - if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) { - DP_ERR(cdev, - "qed roce mac filter failed - roce_info/ll2 NULL\n"); - return -EINVAL; - } - - p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + p_ptt = qed_ptt_acquire(p_hwfn); if (!p_ptt) { DP_ERR(cdev, "qed roce ll2 mac filter set: failed to acquire PTT\n"); return -EINVAL; } - mutex_lock(&hwfn->ll2->lock); if (old_mac_address) - qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, - old_mac_address); + qed_llh_remove_mac_filter(p_hwfn, p_ptt, old_mac_address); if (new_mac_address) - rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, - new_mac_address); - mutex_unlock(&hwfn->ll2->lock); - - qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); - - if (rc) - DP_ERR(cdev, - "qed roce ll2 mac filter set: failed to add mac filter\n"); - - return rc; -} - -static int qed_roce_ll2_start(struct qed_dev *cdev, - struct qed_roce_ll2_params *params) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_roce_ll2_info *roce_ll2; - struct qed_ll2_acquire_data data; - int rc; - - if (!params) { - DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n"); - return -EINVAL; - } - if (!params->cbs.tx_cb || !params->cbs.rx_cb) { - DP_ERR(cdev, - "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n", - params->cbs.tx_cb, params->cbs.rx_cb); - return -EINVAL; - } - if (!is_valid_ether_addr(params->mac_address)) { - DP_ERR(cdev, - "qed roce ll2 start: failed due to invalid Ethernet address %pM\n", - params->mac_address); - return -EINVAL; - } - - /* Initialize */ - roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC); - if (!roce_ll2) { - DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n"); - return -ENOMEM; - } + rc = qed_llh_add_mac_filter(p_hwfn, p_ptt, new_mac_address); - roce_ll2->handle = QED_LL2_UNUSED_HANDLE; - roce_ll2->cbs = params->cbs; - roce_ll2->cb_cookie = params->cb_cookie; - mutex_init(&roce_ll2->lock); - - memset(&data, 0, sizeof(data)); - data.input.conn_type = QED_LL2_TYPE_ROCE; - data.input.mtu = params->mtu; - data.input.rx_num_desc = params->max_rx_buffers; - data.input.tx_num_desc = params->max_tx_buffers; - data.input.rx_drop_ttl0_flg = true; - data.input.rx_vlan_removal_en = false; - data.input.tx_dest = QED_LL2_TX_DEST_NW; - data.input.ai_err_packet_too_big = LL2_DROP_PACKET; - data.input.ai_err_no_buf = LL2_DROP_PACKET; - data.p_connection_handle = &roce_ll2->handle; - data.input.gsi_enable = true; - - rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data); - if (rc) { - DP_ERR(cdev, - "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n", - rc); - goto err; - } - - rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev), - roce_ll2->handle); - if (rc) { - DP_ERR(cdev, - "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n", - rc); - goto err1; - } - - hwfn->ll2 = roce_ll2; - - rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address); - if (rc) { - hwfn->ll2 = NULL; - goto err2; - } - ether_addr_copy(roce_ll2->mac_address, params->mac_address); - - return 0; - -err2: - qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); -err1: - qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); -err: - kfree(roce_ll2); - return rc; -} - -static int qed_roce_ll2_stop(struct qed_dev *cdev) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; - int rc; - - if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) { - DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n"); - return -EINVAL; - } - - /* remove LL2 MAC address filter */ - rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL); - eth_zero_addr(roce_ll2->mac_address); + qed_ptt_release(p_hwfn, p_ptt); - rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), - roce_ll2->handle); if (rc) DP_ERR(cdev, - "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n", - rc); - - qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle); - - roce_ll2->handle = QED_LL2_UNUSED_HANDLE; - - kfree(roce_ll2); + "qed roce ll2 mac filter set: failed to add MAC filter\n"); return rc; } -static int qed_roce_ll2_tx(struct qed_dev *cdev, - struct qed_roce_ll2_packet *pkt, - struct qed_roce_ll2_tx_params *params) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; - enum qed_ll2_roce_flavor_type qed_roce_flavor; - struct qed_ll2_tx_pkt_info ll2_pkt; - u8 flags = 0; - int rc; - int i; - - if (!pkt || !params) { - DP_ERR(cdev, - "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n", - cdev, pkt, params); - return -EINVAL; - } - - qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE - : QED_LL2_RROCE; - - if (pkt->roce_mode == ROCE_V2_IPV4) - flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); - - /* Tx header */ - memset(&ll2_pkt, 0, sizeof(ll2_pkt)); - ll2_pkt.num_of_bds = 1 + pkt->n_seg; - ll2_pkt.bd_flags = flags; - ll2_pkt.tx_dest = QED_LL2_TX_DEST_NW; - ll2_pkt.qed_roce_flavor = qed_roce_flavor; - ll2_pkt.first_frag = pkt->header.baddr; - ll2_pkt.first_frag_len = pkt->header.len; - ll2_pkt.cookie = pkt; - - rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), - roce_ll2->handle, - &ll2_pkt, 1); - if (rc) { - DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc); - return QED_ROCE_TX_HEAD_FAILURE; - } - - /* Tx payload */ - for (i = 0; i < pkt->n_seg; i++) { - rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev), - roce_ll2->handle, - pkt->payload[i].baddr, - pkt->payload[i].len); - if (rc) { - /* If failed not much to do here, partial packet has - * been posted * we can't free memory, will need to wait - * for completion - */ - DP_ERR(cdev, - "roce ll2 tx: payload failed (rc=%d)\n", rc); - return QED_ROCE_TX_FRAG_FAILURE; - } - } - - return 0; -} - -static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev, - struct qed_roce_ll2_buffer *buf, - u64 cookie, u8 notify_fw) -{ - return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), - QED_LEADING_HWFN(cdev)->ll2->handle, - buf->baddr, buf->len, - (void *)(uintptr_t)cookie, notify_fw); -} - -static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats) -{ - struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); - struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2; - - return qed_ll2_get_stats(QED_LEADING_HWFN(cdev), - roce_ll2->handle, stats); -} - static const struct qed_rdma_ops qed_rdma_ops_pass = { .common = &qed_common_ops_pass, .fill_dev_info = &qed_fill_rdma_dev_info, @@ -3040,12 +2765,15 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .rdma_free_tid = &qed_rdma_free_tid, .rdma_register_tid = &qed_rdma_register_tid, .rdma_deregister_tid = &qed_rdma_deregister_tid, - .roce_ll2_start = &qed_roce_ll2_start, - .roce_ll2_stop = &qed_roce_ll2_stop, - .roce_ll2_tx = &qed_roce_ll2_tx, - .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer, - .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, - .roce_ll2_stats = &qed_roce_ll2_stats, + .ll2_acquire_connection = &qed_ll2_acquire_connection, + .ll2_establish_connection = &qed_ll2_establish_connection, + .ll2_terminate_connection = &qed_ll2_terminate_connection, + .ll2_release_connection = &qed_ll2_release_connection, + .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, + .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, + .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, + .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, + .ll2_get_stats = &qed_ll2_get_stats, }; const struct qed_rdma_ops *qed_get_rdma_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 9742af516183..94be3b5a39c4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -170,53 +170,10 @@ struct qed_rdma_qp { void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, union rdma_eqe_data *rdma_data); -void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet); -void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, bool b_last_packet); -void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t rx_buf_addr, - u16 data_length, - u8 data_length_error, - u16 parse_flags, - u16 vlan, - u32 src_mac_addr_hi, - u16 src_mac_addr_lo, bool b_last_packet); #else static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, union rdma_eqe_data *rdma_data) {} -static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, - bool b_last_packet) {} -static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t first_frag_addr, - bool b_last_fragment, - bool b_last_packet) {} -static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn, - u8 connection_handle, - void *cookie, - dma_addr_t rx_buf_addr, - u16 data_length, - u8 data_length_error, - u16 parse_flags, - u16 vlan, - u32 src_mac_addr_hi, - u16 src_mac_addr_lo, - bool b_last_packet) {} #endif #endif diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index a1f63eca430a..5958b45eb699 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -122,6 +122,40 @@ struct qed_ll2_comp_rx_data { } u; }; +typedef +void (*qed_ll2_complete_rx_packet_cb)(void *cxt, + struct qed_ll2_comp_rx_data *data); + +typedef +void (*qed_ll2_release_rx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, + bool b_last_packet); + +typedef +void (*qed_ll2_complete_tx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, + bool b_last_packet); + +typedef +void (*qed_ll2_release_tx_packet_cb)(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet); + +struct qed_ll2_cbs { + qed_ll2_complete_rx_packet_cb rx_comp_cb; + qed_ll2_release_rx_packet_cb rx_release_cb; + qed_ll2_complete_tx_packet_cb tx_comp_cb; + qed_ll2_release_tx_packet_cb tx_release_cb; + void *cookie; +}; + struct qed_ll2_acquire_data_inputs { enum qed_ll2_conn_type conn_type; u16 mtu; @@ -140,6 +174,8 @@ struct qed_ll2_acquire_data_inputs { struct qed_ll2_acquire_data { struct qed_ll2_acquire_data_inputs input; + const struct qed_ll2_cbs *cbs; + /* Output container for LL2 connection's handle */ u8 *p_connection_handle; }; diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h index cbb2ff0ce4bc..8e70f5ee05af 100644 --- a/include/linux/qed/qed_roce_if.h +++ b/include/linux/qed/qed_roce_if.h @@ -34,8 +34,6 @@ #include #include #include -#include -#include #include #include #include @@ -491,42 +489,6 @@ struct qed_roce_ll2_packet { enum qed_roce_ll2_tx_dest tx_dest; }; -struct qed_roce_ll2_tx_params { - int reserved; -}; - -struct qed_roce_ll2_rx_params { - u16 vlan_id; - u8 smac[ETH_ALEN]; - int rc; -}; - -struct qed_roce_ll2_cbs { - void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt); - - void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt, - struct qed_roce_ll2_rx_params *params); -}; - -struct qed_roce_ll2_params { - u16 max_rx_buffers; - u16 max_tx_buffers; - u16 mtu; - u8 mac_address[ETH_ALEN]; - struct qed_roce_ll2_cbs cbs; - void *cb_cookie; -}; - -struct qed_roce_ll2_info { - u8 handle; - struct qed_roce_ll2_cbs cbs; - u8 mac_address[ETH_ALEN]; - void *cb_cookie; - - /* Lock to protect ll2 */ - struct mutex lock; -}; - enum qed_rdma_type { QED_RDMA_TYPE_ROCE, }; @@ -579,26 +541,40 @@ struct qed_rdma_ops { int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *oparams); int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); + int (*rdma_register_tid)(void *rdma_cxt, struct qed_rdma_register_tid_in_params *iparams); + int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); void (*rdma_free_tid)(void *rdma_cxt, u32 itid); - int (*roce_ll2_start)(struct qed_dev *cdev, - struct qed_roce_ll2_params *params); - int (*roce_ll2_stop)(struct qed_dev *cdev); - int (*roce_ll2_tx)(struct qed_dev *cdev, - struct qed_roce_ll2_packet *packet, - struct qed_roce_ll2_tx_params *params); - int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev, - struct qed_roce_ll2_buffer *buf, - u64 cookie, u8 notify_fw); - int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev, - u8 *old_mac_address, - u8 *new_mac_address); - int (*roce_ll2_stats)(struct qed_dev *cdev, - struct qed_ll2_stats *stats); + + int (*ll2_acquire_connection)(void *rdma_cxt, + struct qed_ll2_acquire_data *data); + + int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); + int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); + void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); + + int (*ll2_prepare_tx_packet)(void *rdma_cxt, + u8 connection_handle, + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw); + + int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, + u8 connection_handle, + dma_addr_t addr, + u16 nbytes); + int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, + dma_addr_t addr, u16 buf_len, void *cookie, + u8 notify_fw); + int (*ll2_get_stats)(void *rdma_cxt, + u8 connection_handle, + struct qed_ll2_stats *p_stats); + int (*ll2_set_mac_filter)(struct qed_dev *cdev, + u8 *old_mac_address, u8 *new_mac_address); + }; const struct qed_rdma_ops *qed_get_rdma_ops(void); -- cgit v1.2.3 From 1a4a69751f4d24ffd3530f5a9694636db1566a3b Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Tue, 20 Jun 2017 16:00:00 +0300 Subject: qed: Chain support for external PBL iWARP would require the chains to allocate/free their PBL memory independently, so add the infrastructure to provide it externally. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/main.c | 2 +- drivers/infiniband/hw/qedr/verbs.c | 6 ++--- drivers/net/ethernet/qlogic/qed/qed_dev.c | 35 ++++++++++++++++++++------- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 5 +++- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 6 ++--- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 6 ++--- drivers/net/ethernet/qlogic/qed/qed_spq.c | 6 ++--- drivers/net/ethernet/qlogic/qede/qede_main.c | 8 +++--- include/linux/qed/qed_chain.h | 7 ++++++ include/linux/qed/qed_if.h | 3 ++- 10 files changed, 56 insertions(+), 28 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 485c1fef238b..5a32b802e4da 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -276,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) QED_CHAIN_CNT_TYPE_U16, n_entries, sizeof(struct regpair *), - &cnq->pbl); + &cnq->pbl, NULL); if (rc) goto err4; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 17685cfea6a2..80df89b5a9ce 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -925,7 +925,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, QED_CHAIN_CNT_TYPE_U32, chain_entries, sizeof(union rdma_cqe), - &cq->pbl); + &cq->pbl, NULL); if (rc) goto err1; @@ -1413,7 +1413,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, QED_CHAIN_CNT_TYPE_U32, n_sq_elems, QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl); + &qp->sq.pbl, NULL); if (rc) return rc; @@ -1427,7 +1427,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, QED_CHAIN_CNT_TYPE_U32, n_rq_elems, QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl); + &qp->rq.pbl, NULL); if (rc) return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 65fe4940f20d..8b140541736a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3075,12 +3075,15 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) } pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - dma_free_coherent(&cdev->pdev->dev, - pbl_size, - p_chain->pbl_sp.p_virt_table, - p_chain->pbl_sp.p_phys_table); + + if (!p_chain->b_external_pbl) + dma_free_coherent(&cdev->pdev->dev, + pbl_size, + p_chain->pbl_sp.p_virt_table, + p_chain->pbl_sp.p_phys_table); out: vfree(p_chain->pbl.pp_virt_addr_tbl); + p_chain->pbl.pp_virt_addr_tbl = NULL; } void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) @@ -3174,7 +3177,10 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) return 0; } -static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) +static int +qed_chain_alloc_pbl(struct qed_dev *cdev, + struct qed_chain *p_chain, + struct qed_chain_ext_pbl *ext_pbl) { u32 page_cnt = p_chain->page_cnt, size, i; dma_addr_t p_phys = 0, p_pbl_phys = 0; @@ -3194,8 +3200,16 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) * should be saved to allow its freeing during the error flow. */ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_pbl_phys, GFP_KERNEL); + + if (!ext_pbl) { + p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, + size, &p_pbl_phys, GFP_KERNEL); + } else { + p_pbl_virt = ext_pbl->p_pbl_virt; + p_pbl_phys = ext_pbl->p_pbl_phys; + p_chain->b_external_pbl = true; + } + qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_virt_addr_tbl); if (!p_pbl_virt) @@ -3228,7 +3242,10 @@ int qed_chain_alloc(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, enum qed_chain_cnt_type cnt_type, - u32 num_elems, size_t elem_size, struct qed_chain *p_chain) + u32 num_elems, + size_t elem_size, + struct qed_chain *p_chain, + struct qed_chain_ext_pbl *ext_pbl) { u32 page_cnt; int rc = 0; @@ -3259,7 +3276,7 @@ int qed_chain_alloc(struct qed_dev *cdev, rc = qed_chain_alloc_single(cdev, p_chain); break; case QED_CHAIN_MODE_PBL: - rc = qed_chain_alloc_pbl(cdev, p_chain); + rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl); break; } if (rc) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 12d16c096e36..1f1df1bf127c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -307,6 +307,7 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, * @param num_elems * @param elem_size * @param p_chain + * @param ext_pbl - a possible external PBL * * @return int */ @@ -315,7 +316,9 @@ qed_chain_alloc(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, enum qed_chain_cnt_type cnt_type, - u32 num_elems, size_t elem_size, struct qed_chain *p_chain); + u32 num_elems, + size_t elem_size, + struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl); /** * @brief qed_chain_free - Free chain DMA memory diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 6103723d7118..5a1ed05d0c5f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -752,7 +752,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, - r2tq_num_elements, 0x80, &p_conn->r2tq); + r2tq_num_elements, 0x80, &p_conn->r2tq, NULL); if (rc) goto nomem_r2tq; @@ -763,7 +763,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, uhq_num_elements, - sizeof(struct iscsi_uhqe), &p_conn->uhq); + sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL); if (rc) goto nomem_uhq; @@ -773,7 +773,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, xhq_num_elements, - sizeof(struct iscsi_xhqe), &p_conn->xhq); + sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL); if (rc) goto nomem; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 0e26193156e4..f9c51cb8585e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1056,7 +1056,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, QED_CHAIN_CNT_TYPE_U16, p_ll2_info->input.rx_num_desc, sizeof(struct core_rx_bd), - &p_ll2_info->rx_queue.rxq_chain); + &p_ll2_info->rx_queue.rxq_chain, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); goto out; @@ -1078,7 +1078,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, QED_CHAIN_CNT_TYPE_U16, p_ll2_info->input.rx_num_desc, sizeof(struct core_rx_fast_path_cqe), - &p_ll2_info->rx_queue.rcq_chain); + &p_ll2_info->rx_queue.rcq_chain, NULL); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); goto out; @@ -1108,7 +1108,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, QED_CHAIN_CNT_TYPE_U16, p_ll2_info->input.tx_num_desc, sizeof(struct core_tx_bd), - &p_ll2_info->tx_queue.txq_chain); + &p_ll2_info->tx_queue.txq_chain, NULL); if (rc) goto out; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index dede73f41e61..a971916af7cc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -419,7 +419,7 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) QED_CHAIN_CNT_TYPE_U16, num_elem, sizeof(union event_ring_element), - &p_eq->chain)) + &p_eq->chain, NULL)) goto eq_allocate_fail; /* register EQ completion on the SP SB */ @@ -547,7 +547,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) QED_CHAIN_CNT_TYPE_U16, 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), - &p_spq->chain)) + &p_spq->chain, NULL)) goto spq_allocate_fail; /* allocate and fill the SPQ elements (incl. ramrod data list) */ @@ -953,7 +953,7 @@ int qed_consq_alloc(struct qed_hwfn *p_hwfn) QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, &p_consq->chain)) + 0x80, &p_consq->chain, NULL)) goto consq_allocate_fail; p_hwfn->p_consq = p_consq; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index fdf04bc5406e..37ad79917770 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1317,8 +1317,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(struct eth_rx_bd), - &rxq->rx_bd_ring); - + &rxq->rx_bd_ring, NULL); if (rc) goto err; @@ -1329,7 +1328,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(union eth_rx_cqe), - &rxq->rx_comp_ring); + &rxq->rx_comp_ring, NULL); if (rc) goto err; @@ -1387,7 +1386,8 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) QED_CHAIN_MODE_PBL, QED_CHAIN_CNT_TYPE_U16, txq->num_tx_buffers, - sizeof(*p_virt), &txq->tx_pbl); + sizeof(*p_virt), + &txq->tx_pbl, NULL); if (rc) goto err; diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 5cd7a4608c9b..59ddf9af909e 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -80,6 +80,11 @@ struct qed_chain_pbl_u32 { u32 cons_page_idx; }; +struct qed_chain_ext_pbl { + dma_addr_t p_pbl_phys; + void *p_pbl_virt; +}; + struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ u16 prod_idx; @@ -155,6 +160,8 @@ struct qed_chain { u32 size; u8 intended_use; + + bool b_external_pbl; }; #define QED_CHAIN_PBL_ENTRY_SIZE (8) diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 74f6b99754aa..ef39c7f40ae6 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -634,7 +634,8 @@ struct qed_common_ops { enum qed_chain_cnt_type cnt_type, u32 num_elems, size_t elem_size, - struct qed_chain *p_chain); + struct qed_chain *p_chain, + struct qed_chain_ext_pbl *ext_pbl); void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); -- cgit v1.2.3 From b262a06e642cfb1eeb6c2c772f76dad674ada57e Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Tue, 20 Jun 2017 16:00:03 +0300 Subject: qed*: qede_roce.[ch] -> qede_rdma.[ch] Once we have iWARP support, the qede portion of the qedr<->qede would serve all the RDMA protocols - so rename the file to be appropriate to its function. While we're at it, we're also moving a couple of inclusions to it into .h files and adding includes to make sure it contains all type definitions it requires. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/main.c | 2 +- drivers/infiniband/hw/qedr/qedr.h | 2 +- drivers/net/ethernet/qlogic/qede/Makefile | 2 +- drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_main.c | 1 - drivers/net/ethernet/qlogic/qede/qede_rdma.c | 314 +++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qede/qede_roce.c | 314 --------------------------- include/linux/qed/qede_rdma.h | 93 ++++++++ include/linux/qed/qede_roce.h | 88 -------- 9 files changed, 411 insertions(+), 406 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qede/qede_rdma.c delete mode 100644 drivers/net/ethernet/qlogic/qede/qede_roce.c create mode 100644 include/linux/qed/qede_rdma.h delete mode 100644 include/linux/qed/qede_roce.h (limited to 'include/linux/qed') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 5a32b802e4da..714eb0c92312 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -37,7 +37,7 @@ #include #include #include -#include + #include #include #include "qedr.h" diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 80333ec2c8b6..2376019a48ae 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -37,7 +37,7 @@ #include #include #include -#include +#include #include #include "qedr_hsi_rdma.h" diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index bc5f7c3b277d..75408fbb7680 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o qede-$(CONFIG_DCB) += qede_dcbnl.o -qede-$(CONFIG_QED_RDMA) += qede_roce.o +qede-$(CONFIG_QED_RDMA) += qede_rdma.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 694c09b8997e..2d6b30c47b3a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #ifdef CONFIG_RFS_ACCEL #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 37ad79917770..e9eaa38895e6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -60,7 +60,6 @@ #include #include #include -#include #include "qede.h" #include "qede_ptp.h" diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c new file mode 100644 index 000000000000..9837ee20cbae --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -0,0 +1,314 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include "qede.h" + +static struct qedr_driver *qedr_drv; +static LIST_HEAD(qedr_dev_list); +static DEFINE_MUTEX(qedr_dev_list_lock); + +bool qede_roce_supported(struct qede_dev *dev) +{ + return dev->dev_info.common.rdma_supported; +} + +static void _qede_roce_dev_add(struct qede_dev *edev) +{ + if (!qedr_drv) + return; + + edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, + edev->ndev); +} + +static int qede_roce_create_wq(struct qede_dev *edev) +{ + INIT_LIST_HEAD(&edev->rdma_info.roce_event_list); + edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq"); + if (!edev->rdma_info.roce_wq) { + DP_NOTICE(edev, "qedr: Could not create workqueue\n"); + return -ENOMEM; + } + + return 0; +} + +static void qede_roce_cleanup_event(struct qede_dev *edev) +{ + struct list_head *head = &edev->rdma_info.roce_event_list; + struct qede_roce_event_work *event_node; + + flush_workqueue(edev->rdma_info.roce_wq); + while (!list_empty(head)) { + event_node = list_entry(head->next, struct qede_roce_event_work, + list); + cancel_work_sync(&event_node->work); + list_del(&event_node->list); + kfree(event_node); + } +} + +static void qede_roce_destroy_wq(struct qede_dev *edev) +{ + qede_roce_cleanup_event(edev); + destroy_workqueue(edev->rdma_info.roce_wq); +} + +int qede_roce_dev_add(struct qede_dev *edev) +{ + int rc = 0; + + if (qede_roce_supported(edev)) { + rc = qede_roce_create_wq(edev); + if (rc) + return rc; + + INIT_LIST_HEAD(&edev->rdma_info.entry); + mutex_lock(&qedr_dev_list_lock); + list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); + _qede_roce_dev_add(edev); + mutex_unlock(&qedr_dev_list_lock); + } + + return rc; +} + +static void _qede_roce_dev_remove(struct qede_dev *edev) +{ + if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) + qedr_drv->remove(edev->rdma_info.qedr_dev); + edev->rdma_info.qedr_dev = NULL; +} + +void qede_roce_dev_remove(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + qede_roce_destroy_wq(edev); + mutex_lock(&qedr_dev_list_lock); + _qede_roce_dev_remove(edev); + list_del(&edev->rdma_info.entry); + mutex_unlock(&qedr_dev_list_lock); +} + +static void _qede_roce_dev_open(struct qede_dev *edev) +{ + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); +} + +static void qede_roce_dev_open(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + mutex_lock(&qedr_dev_list_lock); + _qede_roce_dev_open(edev); + mutex_unlock(&qedr_dev_list_lock); +} + +static void _qede_roce_dev_close(struct qede_dev *edev) +{ + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); +} + +static void qede_roce_dev_close(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + mutex_lock(&qedr_dev_list_lock); + _qede_roce_dev_close(edev); + mutex_unlock(&qedr_dev_list_lock); +} + +static void qede_roce_dev_shutdown(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + mutex_lock(&qedr_dev_list_lock); + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); + mutex_unlock(&qedr_dev_list_lock); +} + +int qede_roce_register_driver(struct qedr_driver *drv) +{ + struct qede_dev *edev; + u8 qedr_counter = 0; + + mutex_lock(&qedr_dev_list_lock); + if (qedr_drv) { + mutex_unlock(&qedr_dev_list_lock); + return -EINVAL; + } + qedr_drv = drv; + + list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { + struct net_device *ndev; + + qedr_counter++; + _qede_roce_dev_add(edev); + ndev = edev->ndev; + if (netif_running(ndev) && netif_oper_up(ndev)) + _qede_roce_dev_open(edev); + } + mutex_unlock(&qedr_dev_list_lock); + + pr_notice("qedr: discovered and registered %d RoCE funcs\n", + qedr_counter); + + return 0; +} +EXPORT_SYMBOL(qede_roce_register_driver); + +void qede_roce_unregister_driver(struct qedr_driver *drv) +{ + struct qede_dev *edev; + + mutex_lock(&qedr_dev_list_lock); + list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { + if (edev->rdma_info.qedr_dev) + _qede_roce_dev_remove(edev); + } + qedr_drv = NULL; + mutex_unlock(&qedr_dev_list_lock); +} +EXPORT_SYMBOL(qede_roce_unregister_driver); + +static void qede_roce_changeaddr(struct qede_dev *edev) +{ + if (!qede_roce_supported(edev)) + return; + + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); +} + +static struct qede_roce_event_work * +qede_roce_get_free_event_node(struct qede_dev *edev) +{ + struct qede_roce_event_work *event_node = NULL; + struct list_head *list_node = NULL; + bool found = false; + + list_for_each(list_node, &edev->rdma_info.roce_event_list) { + event_node = list_entry(list_node, struct qede_roce_event_work, + list); + if (!work_pending(&event_node->work)) { + found = true; + break; + } + } + + if (!found) { + event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); + if (!event_node) { + DP_NOTICE(edev, + "qedr: Could not allocate memory for roce work\n"); + return NULL; + } + list_add_tail(&event_node->list, + &edev->rdma_info.roce_event_list); + } + + return event_node; +} + +static void qede_roce_handle_event(struct work_struct *work) +{ + struct qede_roce_event_work *event_node; + enum qede_roce_event event; + struct qede_dev *edev; + + event_node = container_of(work, struct qede_roce_event_work, work); + event = event_node->event; + edev = event_node->ptr; + + switch (event) { + case QEDE_UP: + qede_roce_dev_open(edev); + break; + case QEDE_DOWN: + qede_roce_dev_close(edev); + break; + case QEDE_CLOSE: + qede_roce_dev_shutdown(edev); + break; + case QEDE_CHANGE_ADDR: + qede_roce_changeaddr(edev); + break; + default: + DP_NOTICE(edev, "Invalid roce event %d", event); + } +} + +static void qede_roce_add_event(struct qede_dev *edev, + enum qede_roce_event event) +{ + struct qede_roce_event_work *event_node; + + if (!edev->rdma_info.qedr_dev) + return; + + event_node = qede_roce_get_free_event_node(edev); + if (!event_node) + return; + + event_node->event = event; + event_node->ptr = edev; + + INIT_WORK(&event_node->work, qede_roce_handle_event); + queue_work(edev->rdma_info.roce_wq, &event_node->work); +} + +void qede_roce_dev_event_open(struct qede_dev *edev) +{ + qede_roce_add_event(edev, QEDE_UP); +} + +void qede_roce_dev_event_close(struct qede_dev *edev) +{ + qede_roce_add_event(edev, QEDE_DOWN); +} + +void qede_roce_event_changeaddr(struct qede_dev *edev) +{ + qede_roce_add_event(edev, QEDE_CHANGE_ADDR); +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c deleted file mode 100644 index c0030fb8d842..000000000000 --- a/drivers/net/ethernet/qlogic/qede/qede_roce.c +++ /dev/null @@ -1,314 +0,0 @@ -/* QLogic qedr NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#include -#include -#include -#include -#include -#include "qede.h" - -static struct qedr_driver *qedr_drv; -static LIST_HEAD(qedr_dev_list); -static DEFINE_MUTEX(qedr_dev_list_lock); - -bool qede_roce_supported(struct qede_dev *dev) -{ - return dev->dev_info.common.rdma_supported; -} - -static void _qede_roce_dev_add(struct qede_dev *edev) -{ - if (!qedr_drv) - return; - - edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev, - edev->ndev); -} - -static int qede_roce_create_wq(struct qede_dev *edev) -{ - INIT_LIST_HEAD(&edev->rdma_info.roce_event_list); - edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq"); - if (!edev->rdma_info.roce_wq) { - DP_NOTICE(edev, "qedr: Could not create workqueue\n"); - return -ENOMEM; - } - - return 0; -} - -static void qede_roce_cleanup_event(struct qede_dev *edev) -{ - struct list_head *head = &edev->rdma_info.roce_event_list; - struct qede_roce_event_work *event_node; - - flush_workqueue(edev->rdma_info.roce_wq); - while (!list_empty(head)) { - event_node = list_entry(head->next, struct qede_roce_event_work, - list); - cancel_work_sync(&event_node->work); - list_del(&event_node->list); - kfree(event_node); - } -} - -static void qede_roce_destroy_wq(struct qede_dev *edev) -{ - qede_roce_cleanup_event(edev); - destroy_workqueue(edev->rdma_info.roce_wq); -} - -int qede_roce_dev_add(struct qede_dev *edev) -{ - int rc = 0; - - if (qede_roce_supported(edev)) { - rc = qede_roce_create_wq(edev); - if (rc) - return rc; - - INIT_LIST_HEAD(&edev->rdma_info.entry); - mutex_lock(&qedr_dev_list_lock); - list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); - _qede_roce_dev_add(edev); - mutex_unlock(&qedr_dev_list_lock); - } - - return rc; -} - -static void _qede_roce_dev_remove(struct qede_dev *edev) -{ - if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) - qedr_drv->remove(edev->rdma_info.qedr_dev); - edev->rdma_info.qedr_dev = NULL; -} - -void qede_roce_dev_remove(struct qede_dev *edev) -{ - if (!qede_roce_supported(edev)) - return; - - qede_roce_destroy_wq(edev); - mutex_lock(&qedr_dev_list_lock); - _qede_roce_dev_remove(edev); - list_del(&edev->rdma_info.entry); - mutex_unlock(&qedr_dev_list_lock); -} - -static void _qede_roce_dev_open(struct qede_dev *edev) -{ - if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) - qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); -} - -static void qede_roce_dev_open(struct qede_dev *edev) -{ - if (!qede_roce_supported(edev)) - return; - - mutex_lock(&qedr_dev_list_lock); - _qede_roce_dev_open(edev); - mutex_unlock(&qedr_dev_list_lock); -} - -static void _qede_roce_dev_close(struct qede_dev *edev) -{ - if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) - qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); -} - -static void qede_roce_dev_close(struct qede_dev *edev) -{ - if (!qede_roce_supported(edev)) - return; - - mutex_lock(&qedr_dev_list_lock); - _qede_roce_dev_close(edev); - mutex_unlock(&qedr_dev_list_lock); -} - -static void qede_roce_dev_shutdown(struct qede_dev *edev) -{ - if (!qede_roce_supported(edev)) - return; - - mutex_lock(&qedr_dev_list_lock); - if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) - qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE); - mutex_unlock(&qedr_dev_list_lock); -} - -int qede_roce_register_driver(struct qedr_driver *drv) -{ - struct qede_dev *edev; - u8 qedr_counter = 0; - - mutex_lock(&qedr_dev_list_lock); - if (qedr_drv) { - mutex_unlock(&qedr_dev_list_lock); - return -EINVAL; - } - qedr_drv = drv; - - list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { - struct net_device *ndev; - - qedr_counter++; - _qede_roce_dev_add(edev); - ndev = edev->ndev; - if (netif_running(ndev) && netif_oper_up(ndev)) - _qede_roce_dev_open(edev); - } - mutex_unlock(&qedr_dev_list_lock); - - pr_notice("qedr: discovered and registered %d RoCE funcs\n", - qedr_counter); - - return 0; -} -EXPORT_SYMBOL(qede_roce_register_driver); - -void qede_roce_unregister_driver(struct qedr_driver *drv) -{ - struct qede_dev *edev; - - mutex_lock(&qedr_dev_list_lock); - list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { - if (edev->rdma_info.qedr_dev) - _qede_roce_dev_remove(edev); - } - qedr_drv = NULL; - mutex_unlock(&qedr_dev_list_lock); -} -EXPORT_SYMBOL(qede_roce_unregister_driver); - -static void qede_roce_changeaddr(struct qede_dev *edev) -{ - if (!qede_roce_supported(edev)) - return; - - if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) - qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); -} - -static struct qede_roce_event_work * -qede_roce_get_free_event_node(struct qede_dev *edev) -{ - struct qede_roce_event_work *event_node = NULL; - struct list_head *list_node = NULL; - bool found = false; - - list_for_each(list_node, &edev->rdma_info.roce_event_list) { - event_node = list_entry(list_node, struct qede_roce_event_work, - list); - if (!work_pending(&event_node->work)) { - found = true; - break; - } - } - - if (!found) { - event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); - if (!event_node) { - DP_NOTICE(edev, - "qedr: Could not allocate memory for roce work\n"); - return NULL; - } - list_add_tail(&event_node->list, - &edev->rdma_info.roce_event_list); - } - - return event_node; -} - -static void qede_roce_handle_event(struct work_struct *work) -{ - struct qede_roce_event_work *event_node; - enum qede_roce_event event; - struct qede_dev *edev; - - event_node = container_of(work, struct qede_roce_event_work, work); - event = event_node->event; - edev = event_node->ptr; - - switch (event) { - case QEDE_UP: - qede_roce_dev_open(edev); - break; - case QEDE_DOWN: - qede_roce_dev_close(edev); - break; - case QEDE_CLOSE: - qede_roce_dev_shutdown(edev); - break; - case QEDE_CHANGE_ADDR: - qede_roce_changeaddr(edev); - break; - default: - DP_NOTICE(edev, "Invalid roce event %d", event); - } -} - -static void qede_roce_add_event(struct qede_dev *edev, - enum qede_roce_event event) -{ - struct qede_roce_event_work *event_node; - - if (!edev->rdma_info.qedr_dev) - return; - - event_node = qede_roce_get_free_event_node(edev); - if (!event_node) - return; - - event_node->event = event; - event_node->ptr = edev; - - INIT_WORK(&event_node->work, qede_roce_handle_event); - queue_work(edev->rdma_info.roce_wq, &event_node->work); -} - -void qede_roce_dev_event_open(struct qede_dev *edev) -{ - qede_roce_add_event(edev, QEDE_UP); -} - -void qede_roce_dev_event_close(struct qede_dev *edev) -{ - qede_roce_add_event(edev, QEDE_DOWN); -} - -void qede_roce_event_changeaddr(struct qede_dev *edev) -{ - qede_roce_add_event(edev, QEDE_CHANGE_ADDR); -} diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h new file mode 100644 index 000000000000..a1a9b81f7612 --- /dev/null +++ b/include/linux/qed/qede_rdma.h @@ -0,0 +1,93 @@ +/* QLogic qedr NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef QEDE_ROCE_H +#define QEDE_ROCE_H + +#include +#include +#include +#include + +struct qedr_dev; +struct qed_dev; +struct qede_dev; + +enum qede_roce_event { + QEDE_UP, + QEDE_DOWN, + QEDE_CHANGE_ADDR, + QEDE_CLOSE +}; + +struct qede_roce_event_work { + struct list_head list; + struct work_struct work; + void *ptr; + enum qede_roce_event event; +}; + +struct qedr_driver { + unsigned char name[32]; + + struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *, + struct net_device *); + + void (*remove)(struct qedr_dev *); + void (*notify)(struct qedr_dev *, enum qede_roce_event); +}; + +/* APIs for RoCE driver to register callback handlers, + * which will be invoked when device is added, removed, ifup, ifdown + */ +int qede_roce_register_driver(struct qedr_driver *drv); +void qede_roce_unregister_driver(struct qedr_driver *drv); + +bool qede_roce_supported(struct qede_dev *dev); + +#if IS_ENABLED(CONFIG_QED_RDMA) +int qede_roce_dev_add(struct qede_dev *dev); +void qede_roce_dev_event_open(struct qede_dev *dev); +void qede_roce_dev_event_close(struct qede_dev *dev); +void qede_roce_dev_remove(struct qede_dev *dev); +void qede_roce_event_changeaddr(struct qede_dev *qedr); +#else +static inline int qede_roce_dev_add(struct qede_dev *dev) +{ + return 0; +} + +static inline void qede_roce_dev_event_open(struct qede_dev *dev) {} +static inline void qede_roce_dev_event_close(struct qede_dev *dev) {} +static inline void qede_roce_dev_remove(struct qede_dev *dev) {} +static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {} +#endif +#endif diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h deleted file mode 100644 index 3b8dd551a98c..000000000000 --- a/include/linux/qed/qede_roce.h +++ /dev/null @@ -1,88 +0,0 @@ -/* QLogic qedr NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef QEDE_ROCE_H -#define QEDE_ROCE_H - -struct qedr_dev; -struct qed_dev; -struct qede_dev; - -enum qede_roce_event { - QEDE_UP, - QEDE_DOWN, - QEDE_CHANGE_ADDR, - QEDE_CLOSE -}; - -struct qede_roce_event_work { - struct list_head list; - struct work_struct work; - void *ptr; - enum qede_roce_event event; -}; - -struct qedr_driver { - unsigned char name[32]; - - struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *, - struct net_device *); - - void (*remove)(struct qedr_dev *); - void (*notify)(struct qedr_dev *, enum qede_roce_event); -}; - -/* APIs for RoCE driver to register callback handlers, - * which will be invoked when device is added, removed, ifup, ifdown - */ -int qede_roce_register_driver(struct qedr_driver *drv); -void qede_roce_unregister_driver(struct qedr_driver *drv); - -bool qede_roce_supported(struct qede_dev *dev); - -#if IS_ENABLED(CONFIG_QED_RDMA) -int qede_roce_dev_add(struct qede_dev *dev); -void qede_roce_dev_event_open(struct qede_dev *dev); -void qede_roce_dev_event_close(struct qede_dev *dev); -void qede_roce_dev_remove(struct qede_dev *dev); -void qede_roce_event_changeaddr(struct qede_dev *qedr); -#else -static inline int qede_roce_dev_add(struct qede_dev *dev) -{ - return 0; -} - -static inline void qede_roce_dev_event_open(struct qede_dev *dev) {} -static inline void qede_roce_dev_event_close(struct qede_dev *dev) {} -static inline void qede_roce_dev_remove(struct qede_dev *dev) {} -static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {} -#endif -#endif -- cgit v1.2.3 From bbfcd1e8e1677b1e692144c5709945e1dfe1ed30 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Tue, 20 Jun 2017 16:00:04 +0300 Subject: qed*: Set rdma generic functions prefix Rename the functions common to both iWARP and RoCE to have a prefix of _rdma_ instead of _roce_. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/main.c | 6 +- drivers/net/ethernet/qlogic/qede/qede.h | 4 +- drivers/net/ethernet/qlogic/qede/qede_main.c | 12 +-- drivers/net/ethernet/qlogic/qede/qede_rdma.c | 142 +++++++++++++-------------- include/linux/qed/qede_rdma.h | 37 +++---- 5 files changed, 101 insertions(+), 100 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 714eb0c92312..b5851fd67d4f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -902,7 +902,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) * initialization done before RoCE driver notifies * event to stack. */ -static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) +static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) { switch (event) { case QEDE_UP: @@ -931,12 +931,12 @@ static struct qedr_driver qedr_drv = { static int __init qedr_init_module(void) { - return qede_roce_register_driver(&qedr_drv); + return qede_rdma_register_driver(&qedr_drv); } static void __exit qedr_exit_module(void) { - qede_roce_unregister_driver(&qedr_drv); + qede_rdma_unregister_driver(&qedr_drv); } module_init(qedr_init_module); diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 2d6b30c47b3a..4dfb238221f9 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -154,8 +154,8 @@ struct qede_vlan { struct qede_rdma_dev { struct qedr_dev *qedr_dev; struct list_head entry; - struct list_head roce_event_list; - struct workqueue_struct *roce_wq; + struct list_head rdma_event_list; + struct workqueue_struct *rdma_wq; }; struct qede_ptp; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index e9eaa38895e6..06ca13dd9ddb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -262,7 +262,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event, break; case NETDEV_CHANGEADDR: edev = netdev_priv(ndev); - qede_roce_event_changeaddr(edev); + qede_rdma_event_changeaddr(edev); break; } @@ -977,7 +977,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, qede_init_ndev(edev); - rc = qede_roce_dev_add(edev); + rc = qede_rdma_dev_add(edev); if (rc) goto err3; @@ -1013,7 +1013,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, return 0; err4: - qede_roce_dev_remove(edev); + qede_rdma_dev_remove(edev); err3: free_netdev(edev->ndev); err2: @@ -1064,7 +1064,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) qede_ptp_disable(edev); - qede_roce_dev_remove(edev); + qede_rdma_dev_remove(edev); edev->ops->common->set_power_state(cdev, PCI_D0); @@ -1964,7 +1964,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, edev->state = QEDE_STATE_CLOSED; - qede_roce_dev_event_close(edev); + qede_rdma_dev_event_close(edev); /* Close OS Tx */ netif_tx_disable(edev->ndev); @@ -2069,7 +2069,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, link_params.link_up = true; edev->ops->common->set_link(edev->cdev, &link_params); - qede_roce_dev_event_open(edev); + qede_rdma_dev_event_open(edev); edev->state = QEDE_STATE_OPEN; diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 9837ee20cbae..50b142fad6b8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -40,12 +40,12 @@ static struct qedr_driver *qedr_drv; static LIST_HEAD(qedr_dev_list); static DEFINE_MUTEX(qedr_dev_list_lock); -bool qede_roce_supported(struct qede_dev *dev) +bool qede_rdma_supported(struct qede_dev *dev) { return dev->dev_info.common.rdma_supported; } -static void _qede_roce_dev_add(struct qede_dev *edev) +static void _qede_rdma_dev_add(struct qede_dev *edev) { if (!qedr_drv) return; @@ -54,11 +54,11 @@ static void _qede_roce_dev_add(struct qede_dev *edev) edev->ndev); } -static int qede_roce_create_wq(struct qede_dev *edev) +static int qede_rdma_create_wq(struct qede_dev *edev) { - INIT_LIST_HEAD(&edev->rdma_info.roce_event_list); - edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq"); - if (!edev->rdma_info.roce_wq) { + INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list); + edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq"); + if (!edev->rdma_info.rdma_wq) { DP_NOTICE(edev, "qedr: Could not create workqueue\n"); return -ENOMEM; } @@ -66,14 +66,14 @@ static int qede_roce_create_wq(struct qede_dev *edev) return 0; } -static void qede_roce_cleanup_event(struct qede_dev *edev) +static void qede_rdma_cleanup_event(struct qede_dev *edev) { - struct list_head *head = &edev->rdma_info.roce_event_list; - struct qede_roce_event_work *event_node; + struct list_head *head = &edev->rdma_info.rdma_event_list; + struct qede_rdma_event_work *event_node; - flush_workqueue(edev->rdma_info.roce_wq); + flush_workqueue(edev->rdma_info.rdma_wq); while (!list_empty(head)) { - event_node = list_entry(head->next, struct qede_roce_event_work, + event_node = list_entry(head->next, struct qede_rdma_event_work, list); cancel_work_sync(&event_node->work); list_del(&event_node->list); @@ -81,85 +81,85 @@ static void qede_roce_cleanup_event(struct qede_dev *edev) } } -static void qede_roce_destroy_wq(struct qede_dev *edev) +static void qede_rdma_destroy_wq(struct qede_dev *edev) { - qede_roce_cleanup_event(edev); - destroy_workqueue(edev->rdma_info.roce_wq); + qede_rdma_cleanup_event(edev); + destroy_workqueue(edev->rdma_info.rdma_wq); } -int qede_roce_dev_add(struct qede_dev *edev) +int qede_rdma_dev_add(struct qede_dev *edev) { int rc = 0; - if (qede_roce_supported(edev)) { - rc = qede_roce_create_wq(edev); + if (qede_rdma_supported(edev)) { + rc = qede_rdma_create_wq(edev); if (rc) return rc; INIT_LIST_HEAD(&edev->rdma_info.entry); mutex_lock(&qedr_dev_list_lock); list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); - _qede_roce_dev_add(edev); + _qede_rdma_dev_add(edev); mutex_unlock(&qedr_dev_list_lock); } return rc; } -static void _qede_roce_dev_remove(struct qede_dev *edev) +static void _qede_rdma_dev_remove(struct qede_dev *edev) { if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) qedr_drv->remove(edev->rdma_info.qedr_dev); edev->rdma_info.qedr_dev = NULL; } -void qede_roce_dev_remove(struct qede_dev *edev) +void qede_rdma_dev_remove(struct qede_dev *edev) { - if (!qede_roce_supported(edev)) + if (!qede_rdma_supported(edev)) return; - qede_roce_destroy_wq(edev); + qede_rdma_destroy_wq(edev); mutex_lock(&qedr_dev_list_lock); - _qede_roce_dev_remove(edev); + _qede_rdma_dev_remove(edev); list_del(&edev->rdma_info.entry); mutex_unlock(&qedr_dev_list_lock); } -static void _qede_roce_dev_open(struct qede_dev *edev) +static void _qede_rdma_dev_open(struct qede_dev *edev) { if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); } -static void qede_roce_dev_open(struct qede_dev *edev) +static void qede_rdma_dev_open(struct qede_dev *edev) { - if (!qede_roce_supported(edev)) + if (!qede_rdma_supported(edev)) return; mutex_lock(&qedr_dev_list_lock); - _qede_roce_dev_open(edev); + _qede_rdma_dev_open(edev); mutex_unlock(&qedr_dev_list_lock); } -static void _qede_roce_dev_close(struct qede_dev *edev) +static void _qede_rdma_dev_close(struct qede_dev *edev) { if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); } -static void qede_roce_dev_close(struct qede_dev *edev) +static void qede_rdma_dev_close(struct qede_dev *edev) { - if (!qede_roce_supported(edev)) + if (!qede_rdma_supported(edev)) return; mutex_lock(&qedr_dev_list_lock); - _qede_roce_dev_close(edev); + _qede_rdma_dev_close(edev); mutex_unlock(&qedr_dev_list_lock); } -static void qede_roce_dev_shutdown(struct qede_dev *edev) +static void qede_rdma_dev_shutdown(struct qede_dev *edev) { - if (!qede_roce_supported(edev)) + if (!qede_rdma_supported(edev)) return; mutex_lock(&qedr_dev_list_lock); @@ -168,7 +168,7 @@ static void qede_roce_dev_shutdown(struct qede_dev *edev) mutex_unlock(&qedr_dev_list_lock); } -int qede_roce_register_driver(struct qedr_driver *drv) +int qede_rdma_register_driver(struct qedr_driver *drv) { struct qede_dev *edev; u8 qedr_counter = 0; @@ -184,52 +184,52 @@ int qede_roce_register_driver(struct qedr_driver *drv) struct net_device *ndev; qedr_counter++; - _qede_roce_dev_add(edev); + _qede_rdma_dev_add(edev); ndev = edev->ndev; if (netif_running(ndev) && netif_oper_up(ndev)) - _qede_roce_dev_open(edev); + _qede_rdma_dev_open(edev); } mutex_unlock(&qedr_dev_list_lock); - pr_notice("qedr: discovered and registered %d RoCE funcs\n", + pr_notice("qedr: discovered and registered %d RDMA funcs\n", qedr_counter); return 0; } -EXPORT_SYMBOL(qede_roce_register_driver); +EXPORT_SYMBOL(qede_rdma_register_driver); -void qede_roce_unregister_driver(struct qedr_driver *drv) +void qede_rdma_unregister_driver(struct qedr_driver *drv) { struct qede_dev *edev; mutex_lock(&qedr_dev_list_lock); list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { if (edev->rdma_info.qedr_dev) - _qede_roce_dev_remove(edev); + _qede_rdma_dev_remove(edev); } qedr_drv = NULL; mutex_unlock(&qedr_dev_list_lock); } -EXPORT_SYMBOL(qede_roce_unregister_driver); +EXPORT_SYMBOL(qede_rdma_unregister_driver); -static void qede_roce_changeaddr(struct qede_dev *edev) +static void qede_rdma_changeaddr(struct qede_dev *edev) { - if (!qede_roce_supported(edev)) + if (!qede_rdma_supported(edev)) return; if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); } -static struct qede_roce_event_work * -qede_roce_get_free_event_node(struct qede_dev *edev) +static struct qede_rdma_event_work * +qede_rdma_get_free_event_node(struct qede_dev *edev) { - struct qede_roce_event_work *event_node = NULL; + struct qede_rdma_event_work *event_node = NULL; struct list_head *list_node = NULL; bool found = false; - list_for_each(list_node, &edev->rdma_info.roce_event_list) { - event_node = list_entry(list_node, struct qede_roce_event_work, + list_for_each(list_node, &edev->rdma_info.rdma_event_list) { + event_node = list_entry(list_node, struct qede_rdma_event_work, list); if (!work_pending(&event_node->work)) { found = true; @@ -241,74 +241,74 @@ qede_roce_get_free_event_node(struct qede_dev *edev) event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); if (!event_node) { DP_NOTICE(edev, - "qedr: Could not allocate memory for roce work\n"); + "qedr: Could not allocate memory for rdma work\n"); return NULL; } list_add_tail(&event_node->list, - &edev->rdma_info.roce_event_list); + &edev->rdma_info.rdma_event_list); } return event_node; } -static void qede_roce_handle_event(struct work_struct *work) +static void qede_rdma_handle_event(struct work_struct *work) { - struct qede_roce_event_work *event_node; - enum qede_roce_event event; + struct qede_rdma_event_work *event_node; + enum qede_rdma_event event; struct qede_dev *edev; - event_node = container_of(work, struct qede_roce_event_work, work); + event_node = container_of(work, struct qede_rdma_event_work, work); event = event_node->event; edev = event_node->ptr; switch (event) { case QEDE_UP: - qede_roce_dev_open(edev); + qede_rdma_dev_open(edev); break; case QEDE_DOWN: - qede_roce_dev_close(edev); + qede_rdma_dev_close(edev); break; case QEDE_CLOSE: - qede_roce_dev_shutdown(edev); + qede_rdma_dev_shutdown(edev); break; case QEDE_CHANGE_ADDR: - qede_roce_changeaddr(edev); + qede_rdma_changeaddr(edev); break; default: - DP_NOTICE(edev, "Invalid roce event %d", event); + DP_NOTICE(edev, "Invalid rdma event %d", event); } } -static void qede_roce_add_event(struct qede_dev *edev, - enum qede_roce_event event) +static void qede_rdma_add_event(struct qede_dev *edev, + enum qede_rdma_event event) { - struct qede_roce_event_work *event_node; + struct qede_rdma_event_work *event_node; if (!edev->rdma_info.qedr_dev) return; - event_node = qede_roce_get_free_event_node(edev); + event_node = qede_rdma_get_free_event_node(edev); if (!event_node) return; event_node->event = event; event_node->ptr = edev; - INIT_WORK(&event_node->work, qede_roce_handle_event); - queue_work(edev->rdma_info.roce_wq, &event_node->work); + INIT_WORK(&event_node->work, qede_rdma_handle_event); + queue_work(edev->rdma_info.rdma_wq, &event_node->work); } -void qede_roce_dev_event_open(struct qede_dev *edev) +void qede_rdma_dev_event_open(struct qede_dev *edev) { - qede_roce_add_event(edev, QEDE_UP); + qede_rdma_add_event(edev, QEDE_UP); } -void qede_roce_dev_event_close(struct qede_dev *edev) +void qede_rdma_dev_event_close(struct qede_dev *edev) { - qede_roce_add_event(edev, QEDE_DOWN); + qede_rdma_add_event(edev, QEDE_DOWN); } -void qede_roce_event_changeaddr(struct qede_dev *edev) +void qede_rdma_event_changeaddr(struct qede_dev *edev) { - qede_roce_add_event(edev, QEDE_CHANGE_ADDR); + qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); } diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index a1a9b81f7612..1348a16e5e4b 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -41,18 +41,18 @@ struct qedr_dev; struct qed_dev; struct qede_dev; -enum qede_roce_event { +enum qede_rdma_event { QEDE_UP, QEDE_DOWN, QEDE_CHANGE_ADDR, QEDE_CLOSE }; -struct qede_roce_event_work { +struct qede_rdma_event_work { struct list_head list; struct work_struct work; void *ptr; - enum qede_roce_event event; + enum qede_rdma_event event; }; struct qedr_driver { @@ -62,32 +62,33 @@ struct qedr_driver { struct net_device *); void (*remove)(struct qedr_dev *); - void (*notify)(struct qedr_dev *, enum qede_roce_event); + void (*notify)(struct qedr_dev *, enum qede_rdma_event); }; -/* APIs for RoCE driver to register callback handlers, +/* APIs for RDMA driver to register callback handlers, * which will be invoked when device is added, removed, ifup, ifdown */ -int qede_roce_register_driver(struct qedr_driver *drv); -void qede_roce_unregister_driver(struct qedr_driver *drv); +int qede_rdma_register_driver(struct qedr_driver *drv); +void qede_rdma_unregister_driver(struct qedr_driver *drv); -bool qede_roce_supported(struct qede_dev *dev); +bool qede_rdma_supported(struct qede_dev *dev); #if IS_ENABLED(CONFIG_QED_RDMA) -int qede_roce_dev_add(struct qede_dev *dev); -void qede_roce_dev_event_open(struct qede_dev *dev); -void qede_roce_dev_event_close(struct qede_dev *dev); -void qede_roce_dev_remove(struct qede_dev *dev); -void qede_roce_event_changeaddr(struct qede_dev *qedr); +int qede_rdma_dev_add(struct qede_dev *dev); +void qede_rdma_dev_event_open(struct qede_dev *dev); +void qede_rdma_dev_event_close(struct qede_dev *dev); +void qede_rdma_dev_remove(struct qede_dev *dev); +void qede_rdma_event_changeaddr(struct qede_dev *edr); + #else -static inline int qede_roce_dev_add(struct qede_dev *dev) +static inline int qede_rdma_dev_add(struct qede_dev *dev); { return 0; } -static inline void qede_roce_dev_event_open(struct qede_dev *dev) {} -static inline void qede_roce_dev_event_close(struct qede_dev *dev) {} -static inline void qede_roce_dev_remove(struct qede_dev *dev) {} -static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {} +static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {} +static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {} +static inline void qede_rdma_dev_remove(struct qede_dev *dev) {} +static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {} #endif #endif -- cgit v1.2.3 From da2e9cf03b8fccbb69dd1e215bb1e554ce8e8cbe Mon Sep 17 00:00:00 2001 From: Chad Dupuis Date: Wed, 21 Jun 2017 08:26:34 +0300 Subject: qede: Fix compilation without QED_RDMA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_QED_RDMA isn't defined, we'd hit the following: /include/linux/qed/qede_rdma.h:84:19: warning: ‘qede_rdma_dev_add’ used but never defined [enabled by default] static inline int qede_rdma_dev_add(struct qede_dev *dev); Fixes: bbfcd1e8e167 ("qed*: Set rdma generic functions prefix") Signed-off-by: Chad Dupuis Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- include/linux/qed/qede_rdma.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/qed') diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 1348a16e5e4b..9904617a9730 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -81,7 +81,7 @@ void qede_rdma_dev_remove(struct qede_dev *dev); void qede_rdma_event_changeaddr(struct qede_dev *edr); #else -static inline int qede_rdma_dev_add(struct qede_dev *dev); +static inline int qede_rdma_dev_add(struct qede_dev *dev) { return 0; } -- cgit v1.2.3 From 7003cdd6a121e7bdb8a05eb1931f9549a36ea723 Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Wed, 21 Jun 2017 16:22:46 +0300 Subject: qed*: Rename qed_roce_if.h to qed_rdma_if.h Rename the qed_roce_if file to qed_rdma_if as it represents a common interface for RoCE and iWARP. this commit affects RDMA/qedr as well. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/qedr.h | 2 +- drivers/infiniband/hw/qedr/qedr_cm.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 2 +- include/linux/qed/qed_rdma_if.h | 582 +++++++++++++++++++++++++++++ include/linux/qed/qed_roce_if.h | 582 ----------------------------- 7 files changed, 587 insertions(+), 587 deletions(-) create mode 100644 include/linux/qed/qed_rdma_if.h delete mode 100644 include/linux/qed/qed_roce_if.h (limited to 'include/linux/qed') diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 2376019a48ae..15365d5a0e19 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include #include "qedr_hsi_rdma.h" diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index eb3dce72fc21..4689e802b332 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -44,7 +44,7 @@ #include #include -#include +#include #include "qedr.h" #include "verbs.h" #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index cb620e008444..df76e212f86e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -53,7 +53,7 @@ #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" -#include +#include #include "qed_rdma.h" #include "qed_roce.h" #include "qed_sp.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 1836ff1810b4..d91e5c4069a6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -38,7 +38,7 @@ #include #include #include -#include +#include #include "qed.h" #include "qed_dev_api.h" #include "qed_hsi.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index a8426936f837..e53adc3d009b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -53,7 +53,7 @@ #include "qed_ll2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" -#include +#include #include "qed_rdma.h" #include "qed_roce.h" #include "qed_sp.h" diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h new file mode 100644 index 000000000000..ff9be01b5f53 --- /dev/null +++ b/include/linux/qed/qed_rdma_if.h @@ -0,0 +1,582 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_RDMA_IF_H +#define _QED_RDMA_IF_H +#include +#include +#include +#include +#include +#include +#include + +enum qed_roce_ll2_tx_dest { + /* Light L2 TX Destination to the Network */ + QED_ROCE_LL2_TX_DEST_NW, + + /* Light L2 TX Destination to the Loopback */ + QED_ROCE_LL2_TX_DEST_LB, + QED_ROCE_LL2_TX_DEST_MAX +}; + +#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) + +/* rdma interface */ + +enum qed_roce_qp_state { + QED_ROCE_QP_STATE_RESET, + QED_ROCE_QP_STATE_INIT, + QED_ROCE_QP_STATE_RTR, + QED_ROCE_QP_STATE_RTS, + QED_ROCE_QP_STATE_SQD, + QED_ROCE_QP_STATE_ERR, + QED_ROCE_QP_STATE_SQE +}; + +enum qed_rdma_tid_type { + QED_RDMA_TID_REGISTERED_MR, + QED_RDMA_TID_FMR, + QED_RDMA_TID_MW_TYPE1, + QED_RDMA_TID_MW_TYPE2A +}; + +struct qed_rdma_events { + void *context; + void (*affiliated_event)(void *context, u8 fw_event_code, + void *fw_handle); + void (*unaffiliated_event)(void *context, u8 event_code); +}; + +struct qed_rdma_device { + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + u64 fw_ver; + + u64 node_guid; + u64 sys_image_guid; + + u8 max_cnq; + u8 max_sge; + u8 max_srq_sge; + u16 max_inline; + u32 max_wqe; + u32 max_srq_wqe; + u8 max_qp_resp_rd_atomic_resc; + u8 max_qp_req_rd_atomic_resc; + u64 max_dev_resp_rd_atomic_resc; + u32 max_cq; + u32 max_qp; + u32 max_srq; + u32 max_mr; + u64 max_mr_size; + u32 max_cqe; + u32 max_mw; + u32 max_fmr; + u32 max_mr_mw_fmr_pbl; + u64 max_mr_mw_fmr_size; + u32 max_pd; + u32 max_ah; + u8 max_pkey; + u16 max_srq_wr; + u8 max_stats_queues; + u32 dev_caps; + + /* Abilty to support RNR-NAK generation */ + +#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 +#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 + /* Abilty to support shutdown port */ +#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 +#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 + /* Abilty to support port active event */ +#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 +#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 + /* Abilty to support port change event */ +#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 +#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 + /* Abilty to support system image GUID */ +#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 +#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 + /* Abilty to support bad P_Key counter support */ +#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 + /* Abilty to support atomic operations */ +#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 +#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 +#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 +#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 + /* Abilty to support modifying the maximum number of + * outstanding work requests per QP + */ +#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 +#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 + /* Abilty to support automatic path migration */ +#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 +#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 + /* Abilty to support the base memory management extensions */ +#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 +#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 + /* Abilty to support multipile page sizes per memory region */ +#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 +#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 + /* Abilty to support block list physical buffer list */ +#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 +#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 + /* Abilty to support zero based virtual addresses */ +#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 +#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 + /* Abilty to support local invalidate fencing */ +#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 +#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 + /* Abilty to support Loopback on QP */ +#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 +#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 + u64 page_size_caps; + u8 dev_ack_delay; + u32 reserved_lkey; + u32 bad_pkey_counter; + struct qed_rdma_events events; +}; + +enum qed_port_state { + QED_RDMA_PORT_UP, + QED_RDMA_PORT_DOWN, +}; + +enum qed_roce_capability { + QED_ROCE_V1 = 1 << 0, + QED_ROCE_V2 = 1 << 1, +}; + +struct qed_rdma_port { + enum qed_port_state port_state; + int link_speed; + u64 max_msg_size; + u8 source_gid_table_len; + void *source_gid_table_ptr; + u8 pkey_table_len; + void *pkey_table_ptr; + u32 pkey_bad_counter; + enum qed_roce_capability capability; +}; + +struct qed_rdma_cnq_params { + u8 num_pbl_pages; + u64 pbl_ptr; +}; + +/* The CQ Mode affects the CQ doorbell transaction size. + * 64/32 bit machines should configure to 32/16 bits respectively. + */ +enum qed_rdma_cq_mode { + QED_RDMA_CQ_MODE_16_BITS, + QED_RDMA_CQ_MODE_32_BITS, +}; + +struct qed_roce_dcqcn_params { + u8 notification_point; + u8 reaction_point; + + /* fields for notification point */ + u32 cnp_send_timeout; + + /* fields for reaction point */ + u32 rl_bc_rate; + u16 rl_max_rate; + u16 rl_r_ai; + u16 rl_r_hai; + u16 dcqcn_g; + u32 dcqcn_k_us; + u32 dcqcn_timeout_us; +}; + +struct qed_rdma_start_in_params { + struct qed_rdma_events *events; + struct qed_rdma_cnq_params cnq_pbl_list[128]; + u8 desired_cnq; + enum qed_rdma_cq_mode cq_mode; + struct qed_roce_dcqcn_params dcqcn_params; + u16 max_mtu; + u8 mac_addr[ETH_ALEN]; + u8 iwarp_flags; +}; + +struct qed_rdma_add_user_out_params { + u16 dpi; + u64 dpi_addr; + u64 dpi_phys_addr; + u32 dpi_size; + u16 wid_count; +}; + +enum roce_mode { + ROCE_V1, + ROCE_V2_IPV4, + ROCE_V2_IPV6, + MAX_ROCE_MODE +}; + +union qed_gid { + u8 bytes[16]; + u16 words[8]; + u32 dwords[4]; + u64 qwords[2]; + u32 ipv4_addr; +}; + +struct qed_rdma_register_tid_in_params { + u32 itid; + enum qed_rdma_tid_type tid_type; + u8 key; + u16 pd; + bool local_read; + bool local_write; + bool remote_read; + bool remote_write; + bool remote_atomic; + bool mw_bind; + u64 pbl_ptr; + bool pbl_two_level; + u8 pbl_page_size_log; + u8 page_size_log; + u32 fbo; + u64 length; + u64 vaddr; + bool zbva; + bool phy_mr; + bool dma_mr; + + bool dif_enabled; + u64 dif_error_addr; + u64 dif_runt_addr; +}; + +struct qed_rdma_create_cq_in_params { + u32 cq_handle_lo; + u32 cq_handle_hi; + u32 cq_size; + u16 dpi; + bool pbl_two_level; + u64 pbl_ptr; + u16 pbl_num_pages; + u8 pbl_page_size_log; + u8 cnq_id; + u16 int_timeout; +}; + +struct qed_rdma_create_srq_in_params { + u64 pbl_base_addr; + u64 prod_pair_addr; + u16 num_pages; + u16 pd_id; + u16 page_size; +}; + +struct qed_rdma_destroy_cq_in_params { + u16 icid; +}; + +struct qed_rdma_destroy_cq_out_params { + u16 num_cq_notif; +}; + +struct qed_rdma_create_qp_in_params { + u32 qp_handle_lo; + u32 qp_handle_hi; + u32 qp_handle_async_lo; + u32 qp_handle_async_hi; + bool use_srq; + bool signal_all; + bool fmr_and_reserved_lkey; + u16 pd; + u16 dpi; + u16 sq_cq_id; + u16 sq_num_pages; + u64 sq_pbl_ptr; + u8 max_sq_sges; + u16 rq_cq_id; + u16 rq_num_pages; + u64 rq_pbl_ptr; + u16 srq_id; + u8 stats_queue; +}; + +struct qed_rdma_create_qp_out_params { + u32 qp_id; + u16 icid; + void *rq_pbl_virt; + dma_addr_t rq_pbl_phys; + void *sq_pbl_virt; + dma_addr_t sq_pbl_phys; +}; + +struct qed_rdma_modify_qp_in_params { + u32 modify_flags; +#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0 +#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1 +#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2 +#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3 +#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4 +#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5 +#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1 +#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8 +#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9 +#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10 +#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11 +#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12 +#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13 +#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1 +#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14 + + enum qed_roce_qp_state new_state; + u16 pkey; + bool incoming_rdma_read_en; + bool incoming_rdma_write_en; + bool incoming_atomic_en; + bool e2e_flow_control_en; + u32 dest_qp; + bool lb_indication; + u16 mtu; + u8 traffic_class_tos; + u8 hop_limit_ttl; + u32 flow_label; + union qed_gid sgid; + union qed_gid dgid; + u16 udp_src_port; + + u16 vlan_id; + + u32 rq_psn; + u32 sq_psn; + u8 max_rd_atomic_resp; + u8 max_rd_atomic_req; + u32 ack_timeout; + u8 retry_cnt; + u8 rnr_retry_cnt; + u8 min_rnr_nak_timer; + bool sqd_async; + u8 remote_mac_addr[6]; + u8 local_mac_addr[6]; + bool use_local_mac; + enum roce_mode roce_mode; +}; + +struct qed_rdma_query_qp_out_params { + enum qed_roce_qp_state state; + u32 rq_psn; + u32 sq_psn; + bool draining; + u16 mtu; + u32 dest_qp; + bool incoming_rdma_read_en; + bool incoming_rdma_write_en; + bool incoming_atomic_en; + bool e2e_flow_control_en; + union qed_gid sgid; + union qed_gid dgid; + u32 flow_label; + u8 hop_limit_ttl; + u8 traffic_class_tos; + u32 timeout; + u8 rnr_retry; + u8 retry_cnt; + u8 min_rnr_nak_timer; + u16 pkey_index; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + bool sqd_async; +}; + +struct qed_rdma_create_srq_out_params { + u16 srq_id; +}; + +struct qed_rdma_destroy_srq_in_params { + u16 srq_id; +}; + +struct qed_rdma_modify_srq_in_params { + u32 wqe_limit; + u16 srq_id; +}; + +struct qed_rdma_stats_out_params { + u64 sent_bytes; + u64 sent_pkts; + u64 rcv_bytes; + u64 rcv_pkts; +}; + +struct qed_rdma_counters_out_params { + u64 pd_count; + u64 max_pd; + u64 dpi_count; + u64 max_dpi; + u64 cq_count; + u64 max_cq; + u64 qp_count; + u64 max_qp; + u64 tid_count; + u64 max_tid; +}; + +#define QED_ROCE_TX_HEAD_FAILURE (1) +#define QED_ROCE_TX_FRAG_FAILURE (2) + +struct qed_roce_ll2_header { + void *vaddr; + dma_addr_t baddr; + size_t len; +}; + +struct qed_roce_ll2_buffer { + dma_addr_t baddr; + size_t len; +}; + +struct qed_roce_ll2_packet { + struct qed_roce_ll2_header header; + int n_seg; + struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; + int roce_mode; + enum qed_roce_ll2_tx_dest tx_dest; +}; + +enum qed_rdma_type { + QED_RDMA_TYPE_ROCE, +}; + +struct qed_dev_rdma_info { + struct qed_dev_info common; + enum qed_rdma_type rdma_type; + u8 user_dpm_enabled; +}; + +struct qed_rdma_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct qed_dev *cdev, + struct qed_dev_rdma_info *info); + void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); + + int (*rdma_init)(struct qed_dev *dev, + struct qed_rdma_start_in_params *iparams); + + int (*rdma_add_user)(void *rdma_cxt, + struct qed_rdma_add_user_out_params *oparams); + + void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); + int (*rdma_stop)(void *rdma_cxt); + struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); + struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt); + int (*rdma_get_start_sb)(struct qed_dev *cdev); + int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); + void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); + int (*rdma_get_rdma_int)(struct qed_dev *cdev, + struct qed_int_info *info); + int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); + int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); + void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); + int (*rdma_create_cq)(void *rdma_cxt, + struct qed_rdma_create_cq_in_params *params, + u16 *icid); + int (*rdma_destroy_cq)(void *rdma_cxt, + struct qed_rdma_destroy_cq_in_params *iparams, + struct qed_rdma_destroy_cq_out_params *oparams); + struct qed_rdma_qp * + (*rdma_create_qp)(void *rdma_cxt, + struct qed_rdma_create_qp_in_params *iparams, + struct qed_rdma_create_qp_out_params *oparams); + + int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp, + struct qed_rdma_modify_qp_in_params *iparams); + + int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *oparams); + int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); + + int + (*rdma_register_tid)(void *rdma_cxt, + struct qed_rdma_register_tid_in_params *iparams); + + int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); + int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); + void (*rdma_free_tid)(void *rdma_cxt, u32 itid); + + int (*ll2_acquire_connection)(void *rdma_cxt, + struct qed_ll2_acquire_data *data); + + int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); + int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); + void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); + + int (*ll2_prepare_tx_packet)(void *rdma_cxt, + u8 connection_handle, + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw); + + int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, + u8 connection_handle, + dma_addr_t addr, + u16 nbytes); + int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, + dma_addr_t addr, u16 buf_len, void *cookie, + u8 notify_fw); + int (*ll2_get_stats)(void *rdma_cxt, + u8 connection_handle, + struct qed_ll2_stats *p_stats); + int (*ll2_set_mac_filter)(struct qed_dev *cdev, + u8 *old_mac_address, u8 *new_mac_address); + +}; + +const struct qed_rdma_ops *qed_get_rdma_ops(void); + +#endif diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h deleted file mode 100644 index 8e70f5ee05af..000000000000 --- a/include/linux/qed/qed_roce_if.h +++ /dev/null @@ -1,582 +0,0 @@ -/* QLogic qed NIC Driver - * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ -#ifndef _QED_ROCE_IF_H -#define _QED_ROCE_IF_H -#include -#include -#include -#include -#include -#include -#include - -enum qed_roce_ll2_tx_dest { - /* Light L2 TX Destination to the Network */ - QED_ROCE_LL2_TX_DEST_NW, - - /* Light L2 TX Destination to the Loopback */ - QED_ROCE_LL2_TX_DEST_LB, - QED_ROCE_LL2_TX_DEST_MAX -}; - -#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF) - -/* rdma interface */ - -enum qed_roce_qp_state { - QED_ROCE_QP_STATE_RESET, - QED_ROCE_QP_STATE_INIT, - QED_ROCE_QP_STATE_RTR, - QED_ROCE_QP_STATE_RTS, - QED_ROCE_QP_STATE_SQD, - QED_ROCE_QP_STATE_ERR, - QED_ROCE_QP_STATE_SQE -}; - -enum qed_rdma_tid_type { - QED_RDMA_TID_REGISTERED_MR, - QED_RDMA_TID_FMR, - QED_RDMA_TID_MW_TYPE1, - QED_RDMA_TID_MW_TYPE2A -}; - -struct qed_rdma_events { - void *context; - void (*affiliated_event)(void *context, u8 fw_event_code, - void *fw_handle); - void (*unaffiliated_event)(void *context, u8 event_code); -}; - -struct qed_rdma_device { - u32 vendor_id; - u32 vendor_part_id; - u32 hw_ver; - u64 fw_ver; - - u64 node_guid; - u64 sys_image_guid; - - u8 max_cnq; - u8 max_sge; - u8 max_srq_sge; - u16 max_inline; - u32 max_wqe; - u32 max_srq_wqe; - u8 max_qp_resp_rd_atomic_resc; - u8 max_qp_req_rd_atomic_resc; - u64 max_dev_resp_rd_atomic_resc; - u32 max_cq; - u32 max_qp; - u32 max_srq; - u32 max_mr; - u64 max_mr_size; - u32 max_cqe; - u32 max_mw; - u32 max_fmr; - u32 max_mr_mw_fmr_pbl; - u64 max_mr_mw_fmr_size; - u32 max_pd; - u32 max_ah; - u8 max_pkey; - u16 max_srq_wr; - u8 max_stats_queues; - u32 dev_caps; - - /* Abilty to support RNR-NAK generation */ - -#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1 -#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0 - /* Abilty to support shutdown port */ -#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1 -#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1 - /* Abilty to support port active event */ -#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1 -#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2 - /* Abilty to support port change event */ -#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1 -#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3 - /* Abilty to support system image GUID */ -#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1 -#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4 - /* Abilty to support bad P_Key counter support */ -#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1 -#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5 - /* Abilty to support atomic operations */ -#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1 -#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6 -#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1 -#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7 - /* Abilty to support modifying the maximum number of - * outstanding work requests per QP - */ -#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1 -#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8 - /* Abilty to support automatic path migration */ -#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1 -#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9 - /* Abilty to support the base memory management extensions */ -#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1 -#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10 -#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1 -#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11 - /* Abilty to support multipile page sizes per memory region */ -#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1 -#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12 - /* Abilty to support block list physical buffer list */ -#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1 -#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13 - /* Abilty to support zero based virtual addresses */ -#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1 -#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14 - /* Abilty to support local invalidate fencing */ -#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1 -#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15 - /* Abilty to support Loopback on QP */ -#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1 -#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16 - u64 page_size_caps; - u8 dev_ack_delay; - u32 reserved_lkey; - u32 bad_pkey_counter; - struct qed_rdma_events events; -}; - -enum qed_port_state { - QED_RDMA_PORT_UP, - QED_RDMA_PORT_DOWN, -}; - -enum qed_roce_capability { - QED_ROCE_V1 = 1 << 0, - QED_ROCE_V2 = 1 << 1, -}; - -struct qed_rdma_port { - enum qed_port_state port_state; - int link_speed; - u64 max_msg_size; - u8 source_gid_table_len; - void *source_gid_table_ptr; - u8 pkey_table_len; - void *pkey_table_ptr; - u32 pkey_bad_counter; - enum qed_roce_capability capability; -}; - -struct qed_rdma_cnq_params { - u8 num_pbl_pages; - u64 pbl_ptr; -}; - -/* The CQ Mode affects the CQ doorbell transaction size. - * 64/32 bit machines should configure to 32/16 bits respectively. - */ -enum qed_rdma_cq_mode { - QED_RDMA_CQ_MODE_16_BITS, - QED_RDMA_CQ_MODE_32_BITS, -}; - -struct qed_roce_dcqcn_params { - u8 notification_point; - u8 reaction_point; - - /* fields for notification point */ - u32 cnp_send_timeout; - - /* fields for reaction point */ - u32 rl_bc_rate; - u16 rl_max_rate; - u16 rl_r_ai; - u16 rl_r_hai; - u16 dcqcn_g; - u32 dcqcn_k_us; - u32 dcqcn_timeout_us; -}; - -struct qed_rdma_start_in_params { - struct qed_rdma_events *events; - struct qed_rdma_cnq_params cnq_pbl_list[128]; - u8 desired_cnq; - enum qed_rdma_cq_mode cq_mode; - struct qed_roce_dcqcn_params dcqcn_params; - u16 max_mtu; - u8 mac_addr[ETH_ALEN]; - u8 iwarp_flags; -}; - -struct qed_rdma_add_user_out_params { - u16 dpi; - u64 dpi_addr; - u64 dpi_phys_addr; - u32 dpi_size; - u16 wid_count; -}; - -enum roce_mode { - ROCE_V1, - ROCE_V2_IPV4, - ROCE_V2_IPV6, - MAX_ROCE_MODE -}; - -union qed_gid { - u8 bytes[16]; - u16 words[8]; - u32 dwords[4]; - u64 qwords[2]; - u32 ipv4_addr; -}; - -struct qed_rdma_register_tid_in_params { - u32 itid; - enum qed_rdma_tid_type tid_type; - u8 key; - u16 pd; - bool local_read; - bool local_write; - bool remote_read; - bool remote_write; - bool remote_atomic; - bool mw_bind; - u64 pbl_ptr; - bool pbl_two_level; - u8 pbl_page_size_log; - u8 page_size_log; - u32 fbo; - u64 length; - u64 vaddr; - bool zbva; - bool phy_mr; - bool dma_mr; - - bool dif_enabled; - u64 dif_error_addr; - u64 dif_runt_addr; -}; - -struct qed_rdma_create_cq_in_params { - u32 cq_handle_lo; - u32 cq_handle_hi; - u32 cq_size; - u16 dpi; - bool pbl_two_level; - u64 pbl_ptr; - u16 pbl_num_pages; - u8 pbl_page_size_log; - u8 cnq_id; - u16 int_timeout; -}; - -struct qed_rdma_create_srq_in_params { - u64 pbl_base_addr; - u64 prod_pair_addr; - u16 num_pages; - u16 pd_id; - u16 page_size; -}; - -struct qed_rdma_destroy_cq_in_params { - u16 icid; -}; - -struct qed_rdma_destroy_cq_out_params { - u16 num_cq_notif; -}; - -struct qed_rdma_create_qp_in_params { - u32 qp_handle_lo; - u32 qp_handle_hi; - u32 qp_handle_async_lo; - u32 qp_handle_async_hi; - bool use_srq; - bool signal_all; - bool fmr_and_reserved_lkey; - u16 pd; - u16 dpi; - u16 sq_cq_id; - u16 sq_num_pages; - u64 sq_pbl_ptr; - u8 max_sq_sges; - u16 rq_cq_id; - u16 rq_num_pages; - u64 rq_pbl_ptr; - u16 srq_id; - u8 stats_queue; -}; - -struct qed_rdma_create_qp_out_params { - u32 qp_id; - u16 icid; - void *rq_pbl_virt; - dma_addr_t rq_pbl_phys; - void *sq_pbl_virt; - dma_addr_t sq_pbl_phys; -}; - -struct qed_rdma_modify_qp_in_params { - u32 modify_flags; -#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1 -#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0 -#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1 -#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1 -#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2 -#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3 -#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4 -#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5 -#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6 -#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1 -#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7 -#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1 -#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8 -#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9 -#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10 -#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11 -#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12 -#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13 -#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1 -#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14 - - enum qed_roce_qp_state new_state; - u16 pkey; - bool incoming_rdma_read_en; - bool incoming_rdma_write_en; - bool incoming_atomic_en; - bool e2e_flow_control_en; - u32 dest_qp; - bool lb_indication; - u16 mtu; - u8 traffic_class_tos; - u8 hop_limit_ttl; - u32 flow_label; - union qed_gid sgid; - union qed_gid dgid; - u16 udp_src_port; - - u16 vlan_id; - - u32 rq_psn; - u32 sq_psn; - u8 max_rd_atomic_resp; - u8 max_rd_atomic_req; - u32 ack_timeout; - u8 retry_cnt; - u8 rnr_retry_cnt; - u8 min_rnr_nak_timer; - bool sqd_async; - u8 remote_mac_addr[6]; - u8 local_mac_addr[6]; - bool use_local_mac; - enum roce_mode roce_mode; -}; - -struct qed_rdma_query_qp_out_params { - enum qed_roce_qp_state state; - u32 rq_psn; - u32 sq_psn; - bool draining; - u16 mtu; - u32 dest_qp; - bool incoming_rdma_read_en; - bool incoming_rdma_write_en; - bool incoming_atomic_en; - bool e2e_flow_control_en; - union qed_gid sgid; - union qed_gid dgid; - u32 flow_label; - u8 hop_limit_ttl; - u8 traffic_class_tos; - u32 timeout; - u8 rnr_retry; - u8 retry_cnt; - u8 min_rnr_nak_timer; - u16 pkey_index; - u8 max_rd_atomic; - u8 max_dest_rd_atomic; - bool sqd_async; -}; - -struct qed_rdma_create_srq_out_params { - u16 srq_id; -}; - -struct qed_rdma_destroy_srq_in_params { - u16 srq_id; -}; - -struct qed_rdma_modify_srq_in_params { - u32 wqe_limit; - u16 srq_id; -}; - -struct qed_rdma_stats_out_params { - u64 sent_bytes; - u64 sent_pkts; - u64 rcv_bytes; - u64 rcv_pkts; -}; - -struct qed_rdma_counters_out_params { - u64 pd_count; - u64 max_pd; - u64 dpi_count; - u64 max_dpi; - u64 cq_count; - u64 max_cq; - u64 qp_count; - u64 max_qp; - u64 tid_count; - u64 max_tid; -}; - -#define QED_ROCE_TX_HEAD_FAILURE (1) -#define QED_ROCE_TX_FRAG_FAILURE (2) - -struct qed_roce_ll2_header { - void *vaddr; - dma_addr_t baddr; - size_t len; -}; - -struct qed_roce_ll2_buffer { - dma_addr_t baddr; - size_t len; -}; - -struct qed_roce_ll2_packet { - struct qed_roce_ll2_header header; - int n_seg; - struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE]; - int roce_mode; - enum qed_roce_ll2_tx_dest tx_dest; -}; - -enum qed_rdma_type { - QED_RDMA_TYPE_ROCE, -}; - -struct qed_dev_rdma_info { - struct qed_dev_info common; - enum qed_rdma_type rdma_type; - u8 user_dpm_enabled; -}; - -struct qed_rdma_ops { - const struct qed_common_ops *common; - - int (*fill_dev_info)(struct qed_dev *cdev, - struct qed_dev_rdma_info *info); - void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev); - - int (*rdma_init)(struct qed_dev *dev, - struct qed_rdma_start_in_params *iparams); - - int (*rdma_add_user)(void *rdma_cxt, - struct qed_rdma_add_user_out_params *oparams); - - void (*rdma_remove_user)(void *rdma_cxt, u16 dpi); - int (*rdma_stop)(void *rdma_cxt); - struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt); - struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt); - int (*rdma_get_start_sb)(struct qed_dev *cdev); - int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev); - void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod); - int (*rdma_get_rdma_int)(struct qed_dev *cdev, - struct qed_int_info *info); - int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); - int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); - void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); - int (*rdma_create_cq)(void *rdma_cxt, - struct qed_rdma_create_cq_in_params *params, - u16 *icid); - int (*rdma_destroy_cq)(void *rdma_cxt, - struct qed_rdma_destroy_cq_in_params *iparams, - struct qed_rdma_destroy_cq_out_params *oparams); - struct qed_rdma_qp * - (*rdma_create_qp)(void *rdma_cxt, - struct qed_rdma_create_qp_in_params *iparams, - struct qed_rdma_create_qp_out_params *oparams); - - int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp, - struct qed_rdma_modify_qp_in_params *iparams); - - int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp, - struct qed_rdma_query_qp_out_params *oparams); - int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp); - - int - (*rdma_register_tid)(void *rdma_cxt, - struct qed_rdma_register_tid_in_params *iparams); - - int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid); - int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid); - void (*rdma_free_tid)(void *rdma_cxt, u32 itid); - - int (*ll2_acquire_connection)(void *rdma_cxt, - struct qed_ll2_acquire_data *data); - - int (*ll2_establish_connection)(void *rdma_cxt, u8 connection_handle); - int (*ll2_terminate_connection)(void *rdma_cxt, u8 connection_handle); - void (*ll2_release_connection)(void *rdma_cxt, u8 connection_handle); - - int (*ll2_prepare_tx_packet)(void *rdma_cxt, - u8 connection_handle, - struct qed_ll2_tx_pkt_info *pkt, - bool notify_fw); - - int (*ll2_set_fragment_of_tx_packet)(void *rdma_cxt, - u8 connection_handle, - dma_addr_t addr, - u16 nbytes); - int (*ll2_post_rx_buffer)(void *rdma_cxt, u8 connection_handle, - dma_addr_t addr, u16 buf_len, void *cookie, - u8 notify_fw); - int (*ll2_get_stats)(void *rdma_cxt, - u8 connection_handle, - struct qed_ll2_stats *p_stats); - int (*ll2_set_mac_filter)(struct qed_dev *cdev, - u8 *old_mac_address, u8 *new_mac_address); - -}; - -const struct qed_rdma_ops *qed_get_rdma_ops(void); - -#endif -- cgit v1.2.3 From c851a9dc4359c6b19722de568e9f543c1c23481c Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:21 +0300 Subject: qed: Introduce iWARP personality iWARP personality introduced the need for differentiating in several places in the code whether we are RoCE, iWARP or either. This leads to introducing new macros for querying the personality. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 26 +++++++++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 8 ++++---- drivers/net/ethernet/qlogic/qed/qed_dev.c | 12 +++++------- drivers/net/ethernet/qlogic/qed/qed_l2.c | 3 +-- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 17 ++++++++--------- include/linux/qed/common_hsi.h | 2 +- 7 files changed, 43 insertions(+), 27 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 14b08ee9e3ad..22e1171c317e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -210,14 +210,16 @@ struct qed_tunn_update_params { /* The PCI personality is not quite synonymous to protocol ID: * 1. All personalities need CORE connections - * 2. The Ethernet personality may support also the RoCE protocol + * 2. The Ethernet personality may support also the RoCE/iWARP protocol */ enum qed_pci_personality { QED_PCI_ETH, QED_PCI_FCOE, QED_PCI_ISCSI, QED_PCI_ETH_ROCE, - QED_PCI_DEFAULT /* default in shmem */ + QED_PCI_ETH_IWARP, + QED_PCI_ETH_RDMA, + QED_PCI_DEFAULT, /* default in shmem */ }; /* All VFs are symmetric, all counters are PF + all VFs */ @@ -277,6 +279,7 @@ enum qed_dev_cap { QED_DEV_CAP_FCOE, QED_DEV_CAP_ISCSI, QED_DEV_CAP_ROCE, + QED_DEV_CAP_IWARP, }; enum qed_wol_support { @@ -286,7 +289,24 @@ enum qed_wol_support { struct qed_hw_info { /* PCI personality */ - enum qed_pci_personality personality; + enum qed_pci_personality personality; +#define QED_IS_RDMA_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_ROCE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_IWARP_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_L2_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH || \ + QED_IS_RDMA_PERSONALITY(dev)) +#define QED_IS_FCOE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_FCOE) +#define QED_IS_ISCSI_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ISCSI) /* Resource Allocation scheme results */ u32 resc_start[QED_MAX_RESC]; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index e201214764db..38716f77c21d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -853,7 +853,7 @@ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) if (!excess_lines) return 0; - if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_RDMA_PERSONALITY(p_hwfn)) return 0; p_mngr = p_hwfn->p_cxt_mngr; @@ -1033,7 +1033,7 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, u32 lines, line, sz_left, lines_to_skip = 0; /* Special handling for RoCE that supports dynamic allocation */ - if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) && + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) return 0; @@ -1833,7 +1833,7 @@ static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) tm_offset += tm_iids.pf_tids[i]; } - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) active_seg_mask = 0; STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); @@ -2344,7 +2344,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, last_cid_allocated - 1); if (!p_hwfn->b_rdma_enabled_in_prs) { - /* Enable RoCE search */ + /* Enable RDMA search */ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); p_hwfn->b_rdma_enabled_in_prs = true; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 49667ad9042d..68e61823bfc0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -936,7 +936,7 @@ int qed_resc_alloc(struct qed_dev *cdev) /* EQ */ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { num_cons = qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ROCE, NULL) * 2; @@ -2057,7 +2057,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) qed_int_get_num_sbs(p_hwfn, &sb_cnt); if (IS_ENABLED(CONFIG_QED_RDMA) && - p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + QED_IS_RDMA_PERSONALITY(p_hwfn)) { /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide * the status blocks equally between L2 / RoCE but with * consideration as to how many l2 queues / cnqs we have. @@ -2068,9 +2068,7 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) non_l2_sbs = feat_num[QED_RDMA_CNQ]; } - - if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || - p_hwfn->hw_info.personality == QED_PCI_ETH) { + if (QED_IS_L2_PERSONALITY(p_hwfn)) { /* Start by allocating VF queues, then PF's */ feat_num[QED_VF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_L2_QUEUE), @@ -2083,12 +2081,12 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) QED_VF_L2_QUE)); } - if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + if (QED_IS_FCOE_PERSONALITY(p_hwfn)) feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); - if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) + if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, RESC_NUM(p_hwfn, QED_CMDQS_CQS)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e57699bfbdfa..27ea54ba7e1b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -79,8 +79,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn) unsigned long **pp_qids; u32 i; - if (p_hwfn->hw_info.personality != QED_PCI_ETH && - p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_L2_PERSONALITY(p_hwfn)) return 0; p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 17f9b0a7b553..be66f19d577d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1421,7 +1421,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) if (rc) goto out; - if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) + if (!QED_IS_RDMA_PERSONALITY(p_hwfn)) qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 16cc30b11cce..b11399606990 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -237,6 +237,8 @@ err0: int qed_fill_dev_info(struct qed_dev *cdev, struct qed_dev_info *dev_info) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_hw_info *hw_info = &p_hwfn->hw_info; struct qed_tunnel_info *tun = &cdev->tunnel; struct qed_ptt *ptt; @@ -260,11 +262,10 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; - dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality == - QED_PCI_ETH_ROCE); + dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->dev_type = cdev->type; - ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); + ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); if (IS_PF(cdev)) { dev_info->fw_major = FW_MAJOR_VERSION; @@ -274,8 +275,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->mf_mode = cdev->mf_mode; dev_info->tx_switching = true; - if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support == - QED_WOL_SUPPORT_PME) + if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) dev_info->wol_support = true; dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; @@ -304,7 +304,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, &dev_info->mfw_rev, NULL); } - dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu; + dev_info->mtu = hw_info->mtu; return 0; } @@ -790,7 +790,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, cdev->num_hwfns; if (!IS_ENABLED(CONFIG_QED_RDMA) || - QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE) + !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) return 0; for_each_hwfn(cdev, i) @@ -931,8 +931,7 @@ static void qed_update_pf_params(struct qed_dev *cdev, /* In case we might support RDMA, don't allow qede to be greedy * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn. */ - if (QED_LEADING_HWFN(cdev)->hw_info.personality == - QED_PCI_ETH_ROCE) { + if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { u16 *num_cons; num_cons = ¶ms->eth_pf_params.num_cons; diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index a567cbf8c5b4..885ae1379b5a 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -778,7 +778,7 @@ enum protocol_type { PROTOCOLID_ROCE, PROTOCOLID_CORE, PROTOCOLID_ETH, - PROTOCOLID_RESERVED4, + PROTOCOLID_IWARP, PROTOCOLID_RESERVED5, PROTOCOLID_PREROCE, PROTOCOLID_COMMON, -- cgit v1.2.3 From 67b40dccc45ff5d488aad17114e80e00029fd854 Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:22 +0300 Subject: qed: Implement iWARP initialization, teardown and qp operations This patch adds iWARP support for flows that have common code between RoCE and iWARP, such as initialization, teardown and qp setup verbs: create, destroy, modify, query. It introduces the iWARP specific files qed_iwarp.[ch] and iwarp_common.h Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 2 +- drivers/net/ethernet/qlogic/qed/qed_dev.c | 9 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 1 + drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 531 ++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 85 +++++ drivers/net/ethernet/qlogic/qed/qed_rdma.c | 133 +++++-- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 3 + drivers/net/ethernet/qlogic/qed/qed_roce.c | 20 ++ drivers/net/ethernet/qlogic/qed/qed_sp.h | 5 +- include/linux/qed/iwarp_common.h | 53 +++ include/linux/qed/qed_rdma_if.h | 1 + 11 files changed, 803 insertions(+), 40 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_iwarp.c create mode 100644 drivers/net/ethernet/qlogic/qed/qed_iwarp.h create mode 100644 include/linux/qed/iwarp_common.h (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 67452380b60e..82dd47068e18 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -5,6 +5,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o -qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o +qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o qed-$(CONFIG_QED_FCOE) += qed_fcoe.o diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 68e61823bfc0..6c8505dc5c31 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -937,8 +937,15 @@ int qed_resc_alloc(struct qed_dev *cdev) /* EQ */ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + enum protocol_type rdma_proto; + + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) + rdma_proto = PROTOCOLID_ROCE; + else + rdma_proto = PROTOCOLID_IWARP; + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, - PROTOCOLID_ROCE, + rdma_proto, NULL) * 2; n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 3bf3614b3084..31fb0bffa098 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c new file mode 100644 index 000000000000..a8bd5f8edec6 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -0,0 +1,531 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "qed_cxt.h" +#include "qed_hw.h" +#include "qed_rdma.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" + +#define QED_IWARP_ORD_DEFAULT 32 +#define QED_IWARP_IRD_DEFAULT 32 +#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) +#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024) +#define QED_IWARP_TS_EN BIT(0) +#define QED_IWARP_PARAM_CRC_NEEDED (1) +#define QED_IWARP_PARAM_P2P (1) + +static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, u16 echo, + union event_ring_data *data, + u8 fw_return_code); + +/* Override devinfo with iWARP specific values */ +void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; + dev->max_qp = min_t(u32, + IWARP_MAX_QPS, + p_hwfn->p_rdma_info->num_qps); + + dev->max_cq = dev->max_qp; + + dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT; + dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT; +} + +void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP; + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); + p_hwfn->b_rdma_enabled_in_prs = true; +} + +static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) +{ + cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) +{ + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) { + DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n"); + return rc; + } + *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid); + if (rc) + qed_iwarp_cid_cleaned(p_hwfn, *cid); + + return rc; +} + +int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_create_qp_out_params *out_params) +{ + struct iwarp_create_qp_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u16 physical_queue; + u32 cid; + int rc; + + qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + &qp->shared_queue_phys_addr, + GFP_KERNEL); + if (!qp->shared_queue) + return -ENOMEM; + + out_params->sq_pbl_virt = (u8 *)qp->shared_queue + + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; + out_params->sq_pbl_phys = qp->shared_queue_phys_addr + + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; + out_params->rq_pbl_virt = (u8 *)qp->shared_queue + + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; + out_params->rq_pbl_phys = qp->shared_queue_phys_addr + + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; + + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + goto err1; + + qp->icid = (u16)cid; + + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.cid = qp->icid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_CREATE_QP, + PROTOCOLID_IWARP, &init_data); + if (rc) + goto err2; + + p_ramrod = &p_ent->ramrod.iwarp_create_qp; + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN, + qp->fmr_and_reserved_lkey); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN, + qp->incoming_rdma_read_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN, + qp->incoming_rdma_write_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN, + qp->incoming_atomic_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); + + p_ramrod->pd = qp->pd; + p_ramrod->sq_num_pages = qp->sq_num_pages; + p_ramrod->rq_num_pages = qp->rq_num_pages; + + p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); + p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + + p_ramrod->cq_cid_for_sq = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); + p_ramrod->cq_cid_for_rq = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); + + p_ramrod->dpi = cpu_to_le16(qp->dpi); + + physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_ramrod->physical_q0 = cpu_to_le16(physical_queue); + physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_ramrod->physical_q1 = cpu_to_le16(physical_queue); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err2; + + return rc; + +err2: + qed_iwarp_cid_cleaned(p_hwfn, cid); +err1: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + qp->shared_queue, qp->shared_queue_phys_addr); + + return rc; +} + +static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + struct iwarp_modify_qp_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MODIFY_QP, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iwarp_modify_qp; + SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, + 0x1); + if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) + p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING; + else + p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc); + + return rc; +} + +enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state) +{ + switch (state) { + case QED_ROCE_QP_STATE_RESET: + case QED_ROCE_QP_STATE_INIT: + case QED_ROCE_QP_STATE_RTR: + return QED_IWARP_QP_STATE_IDLE; + case QED_ROCE_QP_STATE_RTS: + return QED_IWARP_QP_STATE_RTS; + case QED_ROCE_QP_STATE_SQD: + return QED_IWARP_QP_STATE_CLOSING; + case QED_ROCE_QP_STATE_ERR: + return QED_IWARP_QP_STATE_ERROR; + case QED_ROCE_QP_STATE_SQE: + return QED_IWARP_QP_STATE_TERMINATE; + default: + return QED_IWARP_QP_STATE_ERROR; + } +} + +static enum qed_roce_qp_state +qed_iwarp2roce_state(enum qed_iwarp_qp_state state) +{ + switch (state) { + case QED_IWARP_QP_STATE_IDLE: + return QED_ROCE_QP_STATE_INIT; + case QED_IWARP_QP_STATE_RTS: + return QED_ROCE_QP_STATE_RTS; + case QED_IWARP_QP_STATE_TERMINATE: + return QED_ROCE_QP_STATE_SQE; + case QED_IWARP_QP_STATE_CLOSING: + return QED_ROCE_QP_STATE_SQD; + case QED_IWARP_QP_STATE_ERROR: + return QED_ROCE_QP_STATE_ERR; + default: + return QED_ROCE_QP_STATE_ERR; + } +} + +const char *iwarp_state_names[] = { + "IDLE", + "RTS", + "TERMINATE", + "CLOSING", + "ERROR", +}; + +int +qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + enum qed_iwarp_qp_state new_state, bool internal) +{ + enum qed_iwarp_qp_state prev_iw_state; + bool modify_fw = false; + int rc = 0; + + /* modify QP can be called from upper-layer or as a result of async + * RST/FIN... therefore need to protect + */ + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + prev_iw_state = qp->iwarp_state; + + if (prev_iw_state == new_state) { + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + return 0; + } + + switch (prev_iw_state) { + case QED_IWARP_QP_STATE_IDLE: + switch (new_state) { + case QED_IWARP_QP_STATE_RTS: + qp->iwarp_state = QED_IWARP_QP_STATE_RTS; + break; + case QED_IWARP_QP_STATE_ERROR: + qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; + if (!internal) + modify_fw = true; + break; + default: + break; + } + break; + case QED_IWARP_QP_STATE_RTS: + switch (new_state) { + case QED_IWARP_QP_STATE_CLOSING: + if (!internal) + modify_fw = true; + + qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING; + break; + case QED_IWARP_QP_STATE_ERROR: + if (!internal) + modify_fw = true; + qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; + break; + default: + break; + } + break; + case QED_IWARP_QP_STATE_ERROR: + switch (new_state) { + case QED_IWARP_QP_STATE_IDLE: + + qp->iwarp_state = new_state; + break; + case QED_IWARP_QP_STATE_CLOSING: + /* could happen due to race... do nothing.... */ + break; + default: + rc = -EINVAL; + } + break; + case QED_IWARP_QP_STATE_TERMINATE: + case QED_IWARP_QP_STATE_CLOSING: + qp->iwarp_state = new_state; + break; + default: + break; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n", + qp->icid, + iwarp_state_names[prev_iw_state], + iwarp_state_names[qp->iwarp_state], + internal ? "internal" : ""); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + + if (modify_fw) + rc = qed_iwarp_modify_fw(p_hwfn, qp); + + return rc; +} + +int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_DESTROY_QP, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc); + + return rc; +} + +int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + int rc = 0; + + if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) { + rc = qed_iwarp_modify_qp(p_hwfn, qp, + QED_IWARP_QP_STATE_ERROR, false); + if (rc) + return rc; + } + + rc = qed_iwarp_fw_destroy(p_hwfn, qp); + + if (qp->shared_queue) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + qp->shared_queue, qp->shared_queue_phys_addr); + + return rc; +} + +#define QED_IWARP_MAX_CID_CLEAN_TIME 100 +#define QED_IWARP_MAX_NO_PROGRESS_CNT 5 + +/* This function waits for all the bits of a bmap to be cleared, as long as + * there is progress ( i.e. the number of bits left to be cleared decreases ) + * the function continues. + */ +static int +qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap) +{ + int prev_weight = 0; + int wait_count = 0; + int weight = 0; + + weight = bitmap_weight(bmap->bitmap, bmap->max_count); + prev_weight = weight; + + while (weight) { + msleep(QED_IWARP_MAX_CID_CLEAN_TIME); + + weight = bitmap_weight(bmap->bitmap, bmap->max_count); + + if (prev_weight == weight) { + wait_count++; + } else { + prev_weight = weight; + wait_count = 0; + } + + if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) { + DP_NOTICE(p_hwfn, + "%s bitmap wait timed out (%d cids pending)\n", + bmap->name, weight); + return -EBUSY; + } + } + return 0; +} + +static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn) +{ + /* Now wait for all cids to be completed */ + return qed_iwarp_wait_cid_map_cleared(p_hwfn, + &p_hwfn->p_rdma_info->cid_map); +} + +int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) +{ + spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + return 0; +} + +void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) +{ +} + +int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params) +{ + struct qed_iwarp_info *iwarp_info; + u32 rcv_wnd_size; + int rc = 0; + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + + iwarp_info->tcp_flags = QED_IWARP_TS_EN; + rcv_wnd_size = QED_IWARP_RCV_WND_SIZE_DEF; + + /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ + iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - + ilog2(QED_IWARP_RCV_WND_SIZE_MIN); + iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; + iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; + + iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; + + spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); + + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, + qed_iwarp_async_event); + + return rc; +} + +int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + int rc; + + rc = qed_iwarp_wait_for_all_cids(p_hwfn); + if (rc) + return rc; + + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP); + + return 0; +} + +static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, u16 echo, + union event_ring_data *data, + u8 fw_return_code) +{ + return 0; +} + +void +qed_iwarp_query_qp(struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) +{ + out_params->state = qed_iwarp2roce_state(qp->iwarp_state); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h new file mode 100644 index 000000000000..05e5e45be6cf --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -0,0 +1,85 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _QED_IWARP_H +#define _QED_IWARP_H + +enum qed_iwarp_qp_state { + QED_IWARP_QP_STATE_IDLE, + QED_IWARP_QP_STATE_RTS, + QED_IWARP_QP_STATE_TERMINATE, + QED_IWARP_QP_STATE_CLOSING, + QED_IWARP_QP_STATE_ERROR, +}; + +enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); + +struct qed_iwarp_info { + spinlock_t iw_lock; /* for iwarp resources */ + spinlock_t qp_lock; /* for teardown races */ + u32 rcv_wnd_scale; + u16 max_mtu; + u8 mac_addr[ETH_ALEN]; + u8 crc_needed; + u8 tcp_flags; + u8 peer2peer; + enum mpa_negotiation_mode mpa_rev; + enum mpa_rtr_type rtr_type; +}; + +int qed_iwarp_alloc(struct qed_hwfn *p_hwfn); + +int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params); + +int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn); + +void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn); + +void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_create_qp_out_params *out_params); + +int qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, + enum qed_iwarp_qp_state new_state, bool internal); + +int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); + +int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); + +void qed_iwarp_query_qp(struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index df76e212f86e..ee6887f8c260 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -161,7 +161,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, NULL); - p_rdma_info->num_qps = num_cons / 2; + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + p_rdma_info->num_qps = num_cons; + else + p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); @@ -252,6 +255,13 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, "Failed to allocate real cid bitmap, rc = %d\n", rc); goto free_cid_map; } + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + rc = qed_iwarp_alloc(p_hwfn); + + if (rc) + goto free_cid_map; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); return 0; @@ -329,6 +339,9 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) { struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_resc_free(p_hwfn); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); @@ -470,6 +483,9 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_init_devinfo(p_hwfn); } static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) @@ -490,29 +506,17 @@ static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 ll2_ethertype_en; + int rc = 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); p_hwfn->b_rdma_enabled_in_prs = false; - qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); - - p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; - - /* We delay writing to this reg until first cid is allocated. See - * qed_cxt_dynamic_ilt_alloc function for more details - */ - ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); - qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, - (ll2_ethertype_en | 0x01)); - - if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { - DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); - return -EINVAL; - } + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_init_hw(p_hwfn, p_ptt); + else + rc = qed_roce_init_hw(p_hwfn, p_ptt); - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); - return 0; + return rc; } static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, @@ -544,7 +548,10 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, if (rc) return rc; - p_ramrod = &p_ent->ramrod.roce_init_func.rdma; + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; + else + p_ramrod = &p_ent->ramrod.roce_init_func.rdma; p_params_header = &p_ramrod->params_header; p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, @@ -641,7 +648,15 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, if (rc) return rc; - qed_roce_setup(p_hwfn); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_setup(p_hwfn, p_ptt, params); + if (rc) + return rc; + } else { + rc = qed_roce_setup(p_hwfn); + if (rc) + return rc; + } return qed_rdma_start_fw(p_hwfn, params, p_ptt); } @@ -675,7 +690,16 @@ int qed_rdma_stop(void *rdma_cxt) qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, (ll2_ethertype_en & 0xFFFE)); - qed_roce_stop(p_hwfn); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_stop(p_hwfn, p_ptt); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + } else { + qed_roce_stop(p_hwfn); + } + qed_ptt_release(p_hwfn, p_ptt); /* Get SPQ entry */ @@ -810,7 +834,9 @@ static int qed_fill_rdma_dev_info(struct qed_dev *cdev, memset(info, 0, sizeof(*info)); - info->rdma_type = QED_RDMA_TYPE_ROCE; + info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? + QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; + info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); qed_fill_dev_info(cdev, &info->common); @@ -1112,7 +1138,7 @@ static int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_query_qp_out_params *out_params) { struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; - int rc; + int rc = 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); @@ -1138,7 +1164,10 @@ static int qed_rdma_query_qp(void *rdma_cxt, out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; out_params->sqd_async = qp->sqd_async; - rc = qed_roce_query_qp(p_hwfn, qp, out_params); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_query_qp(qp, out_params); + else + rc = qed_roce_query_qp(p_hwfn, qp, out_params); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); return rc; @@ -1151,7 +1180,10 @@ static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); - rc = qed_roce_destroy_qp(p_hwfn, qp); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + rc = qed_iwarp_destroy_qp(p_hwfn, qp); + else + rc = qed_roce_destroy_qp(p_hwfn, qp); /* free qp params struct */ kfree(qp); @@ -1190,20 +1222,27 @@ qed_rdma_create_qp(void *rdma_cxt, return NULL; } + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + if (in_params->sq_num_pages * sizeof(struct regpair) > + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { + DP_NOTICE(p_hwfn->cdev, + "Sq num pages: %d exceeds maximum\n", + in_params->sq_num_pages); + return NULL; + } + if (in_params->rq_num_pages * sizeof(struct regpair) > + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { + DP_NOTICE(p_hwfn->cdev, + "Rq num pages: %d exceeds maximum\n", + in_params->rq_num_pages); + return NULL; + } + } + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return NULL; - rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); - qp->qpid = ((0xFF << 16) | qp->icid); - - DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid); - - if (rc) { - kfree(qp); - return NULL; - } - qp->cur_state = QED_ROCE_QP_STATE_RESET; qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); @@ -1226,6 +1265,19 @@ qed_rdma_create_qp(void *rdma_cxt, qp->e2e_flow_control_en = qp->use_srq ? false : true; qp->stats_queue = in_params->stats_queue; + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); + qp->qpid = qp->icid; + } else { + rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); + qp->qpid = ((0xFF << 16) | qp->icid); + } + + if (rc) { + kfree(qp); + return NULL; + } + out_params->icid = qp->icid; out_params->qp_id = qp->qpid; @@ -1324,7 +1376,14 @@ static int qed_rdma_modify_qp(void *rdma_cxt, qp->cur_state); } - rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + enum qed_iwarp_qp_state new_state = + qed_roce2iwarp_state(qp->cur_state); + + rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); + } else { + rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); + } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index d91e5c4069a6..90e4e0f13727 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -42,6 +42,7 @@ #include "qed.h" #include "qed_dev_api.h" #include "qed_hsi.h" +#include "qed_iwarp.h" #include "qed_roce.h" #define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS) @@ -97,6 +98,7 @@ struct qed_rdma_info { u16 queue_zone_base; u16 max_queue_zones; enum protocol_type proto; + struct qed_iwarp_info iwarp; }; struct qed_rdma_qp { @@ -105,6 +107,7 @@ struct qed_rdma_qp { u32 qpid; u16 icid; enum qed_roce_qp_state cur_state; + enum qed_iwarp_qp_state iwarp_state; bool use_srq; bool signal_all; bool fmr_and_reserved_lkey; diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index e53adc3d009b..fb7c2d1562ae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1149,3 +1149,23 @@ int qed_roce_setup(struct qed_hwfn *p_hwfn) qed_roce_async_event); } +int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 ll2_ethertype_en; + + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); + + p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; + + ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); + qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, + (ll2_ethertype_en | 0x01)); + + if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { + DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 56c95fb9a26d..c3752c56e54d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -104,12 +104,15 @@ union ramrod_data { struct roce_query_qp_req_ramrod_data roce_query_qp_req; struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; + struct roce_init_func_ramrod_data roce_init_func; struct rdma_create_cq_ramrod_data rdma_create_cq; struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; struct rdma_srq_create_ramrod_data rdma_create_srq; struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq; - struct roce_init_func_ramrod_data roce_init_func; + struct iwarp_create_qp_ramrod_data iwarp_create_qp; + struct iwarp_modify_qp_ramrod_data iwarp_modify_qp; + struct iwarp_init_func_ramrod_data iwarp_init_func; struct fcoe_init_ramrod_params fcoe_init; struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld; struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h new file mode 100644 index 000000000000..b8b3e1cfae90 --- /dev/null +++ b/include/linux/qed/iwarp_common.h @@ -0,0 +1,53 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and /or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __IWARP_COMMON__ +#define __IWARP_COMMON__ +#include +/************************/ +/* IWARP FW CONSTANTS */ +/************************/ + +#define IWARP_ACTIVE_MODE 0 +#define IWARP_PASSIVE_MODE 1 + +#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) + +#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) +#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) + +#define IWARP_MAX_QPS (64 * 1024) + +#endif /* __IWARP_COMMON__ */ diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index ff9be01b5f53..5b4bb09a3354 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -491,6 +491,7 @@ struct qed_roce_ll2_packet { enum qed_rdma_type { QED_RDMA_TYPE_ROCE, + QED_RDMA_TYPE_IWARP }; struct qed_dev_rdma_info { -- cgit v1.2.3 From 526d1d05e456c9cfc077694d18b5f521e2338f18 Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:23 +0300 Subject: qed: Rename some ll2 related defines Make some names more generic as they will be used by iWARP too. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 29 ++++++++++++++--------------- include/linux/qed/qed_ll2_if.h | 2 +- 3 files changed, 16 insertions(+), 17 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 22e1171c317e..fd8cd5e17121 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -779,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, } #define PURE_LB_TC 8 -#define OOO_LB_TC 9 +#define PKT_LB_TC 9 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index be66f19d577d..e235fb267ab9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -309,7 +309,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); - if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; @@ -532,7 +532,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); - if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; @@ -893,8 +893,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; - p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0 - : 1; + p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1; if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { @@ -924,7 +923,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) return 0; - if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) p_ll2_conn->tx_stats_en = 0; else p_ll2_conn->tx_stats_en = 1; @@ -955,10 +954,10 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->pbl_size = cpu_to_le16(pbl_size); switch (p_ll2_conn->input.tx_tc) { - case LB_TC: + case PURE_LB_TC: pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); break; - case OOO_LB_TC: + case PKT_LB_TC: pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); break; default: @@ -973,7 +972,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->conn_type = PROTOCOLID_FCOE; break; case QED_LL2_TYPE_ISCSI: - case QED_LL2_TYPE_ISCSI_OOO: + case QED_LL2_TYPE_OOO: p_ramrod->conn_type = PROTOCOLID_ISCSI; break; case QED_LL2_TYPE_ROCE: @@ -1142,7 +1141,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, u16 buf_idx; int rc = 0; - if (p_ll2_info->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO) return rc; /* Correct number of requested OOO buffers if needed */ @@ -1280,7 +1279,7 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) goto q_allocate_fail; /* Register callbacks for the Rx/Tx queues */ - if (data->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) { + if (data->input.conn_type == QED_LL2_TYPE_OOO) { comp_rx_cb = qed_ll2_lb_rxq_completion; comp_tx_cb = qed_ll2_lb_txq_completion; } else { @@ -1339,7 +1338,7 @@ static void qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { - if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) return; qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); @@ -1794,7 +1793,7 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) qed_ll2_rxq_flush(p_hwfn, connection_handle); } - if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { @@ -1816,7 +1815,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, { struct qed_ooo_buffer *p_buffer; - if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_ISCSI_OOO) + if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) return; qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); @@ -2063,7 +2062,7 @@ static void qed_ll2_set_conn_data(struct qed_dev *cdev, ll2_cbs.cookie = QED_LEADING_HWFN(cdev); if (lb) { - data->input.tx_tc = OOO_LB_TC; + data->input.tx_tc = PKT_LB_TC; data->input.tx_dest = QED_LL2_TX_DEST_LB; } else { data->input.tx_tc = 0; @@ -2080,7 +2079,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev, int rc; qed_ll2_set_conn_data(cdev, &data, params, - QED_LL2_TYPE_ISCSI_OOO, handle, true); + QED_LL2_TYPE_OOO, handle, true); rc = qed_ll2_acquire_connection(hwfn, &data); if (rc) { diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 5958b45eb699..c9c56bc42a82 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -47,7 +47,7 @@ enum qed_ll2_conn_type { QED_LL2_TYPE_FCOE, QED_LL2_TYPE_ISCSI, QED_LL2_TYPE_TEST, - QED_LL2_TYPE_ISCSI_OOO, + QED_LL2_TYPE_OOO, QED_LL2_TYPE_RESERVED2, QED_LL2_TYPE_ROCE, QED_LL2_TYPE_RESERVED3, -- cgit v1.2.3 From cc4ad324e7e247bb4979791dd4f2ff11419d9742 Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:24 +0300 Subject: qed: Add iWARP support in ll2 connections Add a new connection type for iWARP ll2 connections for setting correct ll2 filters and connection type to FW. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 13 +++++++++++-- include/linux/qed/qed_ll2_if.h | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index e235fb267ab9..c06ad4f0758e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -896,7 +896,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1; if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && - p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { + p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && + (conn_type != QED_LL2_TYPE_IWARP)) { p_ramrod->mf_si_bcast_accept_all = 1; p_ramrod->mf_si_mcast_accept_all = 1; } else { @@ -972,12 +973,20 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->conn_type = PROTOCOLID_FCOE; break; case QED_LL2_TYPE_ISCSI: - case QED_LL2_TYPE_OOO: p_ramrod->conn_type = PROTOCOLID_ISCSI; break; case QED_LL2_TYPE_ROCE: p_ramrod->conn_type = PROTOCOLID_ROCE; break; + case QED_LL2_TYPE_IWARP: + p_ramrod->conn_type = PROTOCOLID_IWARP; + break; + case QED_LL2_TYPE_OOO: + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) + p_ramrod->conn_type = PROTOCOLID_ISCSI; + else + p_ramrod->conn_type = PROTOCOLID_IWARP; + break; default: p_ramrod->conn_type = PROTOCOLID_ETH; DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index c9c56bc42a82..dd7a3b86bb9e 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -50,6 +50,7 @@ enum qed_ll2_conn_type { QED_LL2_TYPE_OOO, QED_LL2_TYPE_RESERVED2, QED_LL2_TYPE_ROCE, + QED_LL2_TYPE_IWARP, QED_LL2_TYPE_RESERVED3, MAX_QED_LL2_RX_CONN_TYPE }; -- cgit v1.2.3 From 65a91a6cdb868a28b919ca133c0f9d9dfd9a635a Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:26 +0300 Subject: qed: iWARP CM add listener functions and initial SYN processing This patch adds the ability to add and remove listeners and identify whether the SYN packet received is intended for iWARP or not. If a listener is not found the SYN packet is posted back to the chip. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 269 +++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 23 +++ drivers/net/ethernet/qlogic/qed/qed_rdma.c | 2 + include/linux/qed/qed_rdma_if.h | 52 ++++++ 4 files changed, 343 insertions(+), 3 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index f3b4b32100f5..2bab57c6bae8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -31,6 +31,10 @@ */ #include #include +#include +#include +#include +#include #include "qed_cxt.h" #include "qed_hw.h" #include "qed_ll2.h" @@ -477,6 +481,31 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) { } +static void +qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n", + cm_info->ip_version); + + if (cm_info->ip_version == QED_TCP_IPV4) + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n", + cm_info->remote_ip, cm_info->remote_port, + cm_info->local_ip, cm_info->local_port, + cm_info->vlan); + else + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "remote_ip %pI6h:%x, local_ip %pI6h:%x vlan=%x\n", + cm_info->remote_ip, cm_info->remote_port, + cm_info->local_ip, cm_info->local_port, + cm_info->vlan); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "private_data_len = %x ord = %d, ird = %d\n", + cm_info->private_data_len, cm_info->ord, cm_info->ird); +} + static int qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, struct qed_iwarp_ll2_buff *buf, u8 handle) @@ -497,11 +526,147 @@ qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, return rc; } +static struct qed_iwarp_listener * +qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info) +{ + struct qed_iwarp_listener *listener = NULL; + static const u32 ip_zero[4] = { 0, 0, 0, 0 }; + bool found = false; + + qed_iwarp_print_cm_info(p_hwfn, cm_info); + + list_for_each_entry(listener, + &p_hwfn->p_rdma_info->iwarp.listen_list, + list_entry) { + if (listener->port == cm_info->local_port) { + if (!memcmp(listener->ip_addr, + ip_zero, sizeof(ip_zero))) { + found = true; + break; + } + + if (!memcmp(listener->ip_addr, + cm_info->local_ip, + sizeof(cm_info->local_ip)) && + (listener->vlan == cm_info->vlan)) { + found = true; + break; + } + } + } + + if (found) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n", + listener); + return listener; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n"); + return NULL; +} + +static int +qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info, + void *buf, + u8 *remote_mac_addr, + u8 *local_mac_addr, + int *payload_len, int *tcp_start_offset) +{ + struct vlan_ethhdr *vethh; + bool vlan_valid = false; + struct ipv6hdr *ip6h; + struct ethhdr *ethh; + struct tcphdr *tcph; + struct iphdr *iph; + int eth_hlen; + int ip_hlen; + int eth_type; + int i; + + ethh = buf; + eth_type = ntohs(ethh->h_proto); + if (eth_type == ETH_P_8021Q) { + vlan_valid = true; + vethh = (struct vlan_ethhdr *)ethh; + cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK; + eth_type = ntohs(vethh->h_vlan_encapsulated_proto); + } + + eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + + memcpy(remote_mac_addr, ethh->h_source, ETH_ALEN); + + memcpy(local_mac_addr, ethh->h_dest, ETH_ALEN); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n", + eth_type, ethh->h_source); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n", + eth_hlen, ethh->h_dest); + + iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); + + if (eth_type == ETH_P_IP) { + cm_info->local_ip[0] = ntohl(iph->daddr); + cm_info->remote_ip[0] = ntohl(iph->saddr); + cm_info->ip_version = TCP_IPV4; + + ip_hlen = (iph->ihl) * sizeof(u32); + *payload_len = ntohs(iph->tot_len) - ip_hlen; + } else if (eth_type == ETH_P_IPV6) { + ip6h = (struct ipv6hdr *)iph; + for (i = 0; i < 4; i++) { + cm_info->local_ip[i] = + ntohl(ip6h->daddr.in6_u.u6_addr32[i]); + cm_info->remote_ip[i] = + ntohl(ip6h->saddr.in6_u.u6_addr32[i]); + } + cm_info->ip_version = TCP_IPV6; + + ip_hlen = sizeof(*ip6h); + *payload_len = ntohs(ip6h->payload_len); + } else { + DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type); + return -EINVAL; + } + + tcph = (struct tcphdr *)((u8 *)iph + ip_hlen); + + if (!tcph->syn) { + DP_NOTICE(p_hwfn, + "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n", + iph->ihl, tcph->source, tcph->dest); + return -EINVAL; + } + + cm_info->local_port = ntohs(tcph->dest); + cm_info->remote_port = ntohs(tcph->source); + + qed_iwarp_print_cm_info(p_hwfn, cm_info); + + *tcp_start_offset = eth_hlen + ip_hlen; + + return 0; +} + static void qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) { struct qed_iwarp_ll2_buff *buf = data->cookie; + struct qed_iwarp_listener *listener; + struct qed_ll2_tx_pkt_info tx_pkt; + struct qed_iwarp_cm_info cm_info; struct qed_hwfn *p_hwfn = cxt; + u8 remote_mac_addr[ETH_ALEN]; + u8 local_mac_addr[ETH_ALEN]; + int tcp_start_offset; + u8 ll2_syn_handle; + int payload_len; + int rc; + + memset(&cm_info, 0, sizeof(cm_info)); if (GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && @@ -510,11 +675,52 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) goto err; } - /* Process SYN packet - added later on in series */ + rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) + + data->u.placement_offset, remote_mac_addr, + local_mac_addr, &payload_len, + &tcp_start_offset); + if (rc) + goto err; + + /* Check if there is a listener for this 4-tuple+vlan */ + ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; + listener = qed_iwarp_get_listener(p_hwfn, &cm_info); + if (!listener) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "SYN received on tuple not listened on parse_flags=%d packet len=%d\n", + data->parse_flags, data->length.packet_length); + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.vlan = data->vlan; + + if (GET_FIELD(data->parse_flags, + PARSING_AND_ERR_FLAGS_TAG8021QEXIST)) + SET_FIELD(tx_pkt.bd_flags, + CORE_TX_BD_DATA_VLAN_INSERTION, 1); + + tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + tx_pkt.first_frag = buf->data_phys_addr + + data->u.placement_offset; + tx_pkt.first_frag_len = data->length.packet_length; + tx_pkt.cookie = buf; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle, + &tx_pkt, true); + + if (rc) { + DP_NOTICE(p_hwfn, + "Can't post SYN back to chip rc=%d\n", rc); + goto err; + } + return; + } + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n"); err: - qed_iwarp_ll2_post_rx(p_hwfn, buf, - p_hwfn->p_rdma_info->iwarp.ll2_syn_handle); + qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle); } static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle, @@ -700,6 +906,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list); qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, qed_iwarp_async_event); @@ -728,6 +935,62 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, return 0; } +int +qed_iwarp_create_listen(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_listener *listener; + + listener = kzalloc(sizeof(*listener), GFP_KERNEL); + if (!listener) + return -ENOMEM; + + listener->ip_version = iparams->ip_version; + memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr)); + listener->port = iparams->port; + listener->vlan = iparams->vlan; + + listener->event_cb = iparams->event_cb; + listener->cb_context = iparams->cb_context; + listener->max_backlog = iparams->max_backlog; + oparams->handle = listener; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&listener->list_entry, + &p_hwfn->p_rdma_info->iwarp.listen_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n", + listener->event_cb, + listener, + listener->ip_addr[0], + listener->ip_addr[1], + listener->ip_addr[2], + listener->ip_addr[3], listener->port, listener->vlan); + + return 0; +} + +int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle) +{ + struct qed_iwarp_listener *listener = handle; + struct qed_hwfn *p_hwfn = rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle); + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&listener->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + kfree(listener); + + return 0; +} + void qed_iwarp_query_qp(struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *out_params) diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 068b8594f1c5..29005ac83a6a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -54,6 +54,7 @@ struct qed_iwarp_ll2_buff { }; struct qed_iwarp_info { + struct list_head listen_list; /* qed_iwarp_listener */ spinlock_t iw_lock; /* for iwarp resources */ spinlock_t qp_lock; /* for teardown races */ u32 rcv_wnd_scale; @@ -67,6 +68,21 @@ struct qed_iwarp_info { enum mpa_rtr_type rtr_type; }; +struct qed_iwarp_listener { + struct list_head list_entry; + + /* The event_cb function is called for connection requests. + * The cb_context is passed to the event_cb function. + */ + iwarp_event_handler event_cb; + void *cb_context; + u32 max_backlog; + u32 ip_addr[4]; + u16 port; + u16 vlan; + u8 ip_version; +}; + int qed_iwarp_alloc(struct qed_hwfn *p_hwfn); int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -94,4 +110,11 @@ int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); void qed_iwarp_query_qp(struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *out_params); +int +qed_iwarp_create_listen(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams); + +int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index ee6887f8c260..29de915007a0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1772,6 +1772,8 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, .ll2_get_stats = &qed_ll2_get_stats, + .iwarp_create_listen = &qed_iwarp_create_listen, + .iwarp_destroy_listen = &qed_iwarp_destroy_listen, }; const struct qed_rdma_ops *qed_get_rdma_ops(void) diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 5b4bb09a3354..28df5688ad0c 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -470,6 +470,52 @@ struct qed_rdma_counters_out_params { #define QED_ROCE_TX_HEAD_FAILURE (1) #define QED_ROCE_TX_FRAG_FAILURE (2) +enum qed_iwarp_event_type { + QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ +}; + +enum qed_tcp_ip_version { + QED_TCP_IPV4, + QED_TCP_IPV6, +}; + +struct qed_iwarp_cm_info { + enum qed_tcp_ip_version ip_version; + u32 remote_ip[4]; + u32 local_ip[4]; + u16 remote_port; + u16 local_port; + u16 vlan; + u8 ord; + u8 ird; + u16 private_data_len; + const void *private_data; +}; + +struct qed_iwarp_cm_event_params { + enum qed_iwarp_event_type event; + const struct qed_iwarp_cm_info *cm_info; + void *ep_context; /* To be passed to accept call */ + int status; +}; + +typedef int (*iwarp_event_handler) (void *context, + struct qed_iwarp_cm_event_params *event); + +struct qed_iwarp_listen_in { + iwarp_event_handler event_cb; + void *cb_context; /* passed to event_cb */ + u32 max_backlog; + enum qed_tcp_ip_version ip_version; + u32 ip_addr[4]; + u16 port; + u16 vlan; +}; + +struct qed_iwarp_listen_out { + void *handle; +}; + struct qed_roce_ll2_header { void *vaddr; dma_addr_t baddr; @@ -576,6 +622,12 @@ struct qed_rdma_ops { int (*ll2_set_mac_filter)(struct qed_dev *cdev, u8 *old_mac_address, u8 *new_mac_address); + int (*iwarp_create_listen)(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams); + + int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); + }; const struct qed_rdma_ops *qed_get_rdma_ops(void); -- cgit v1.2.3 From 456a584947d5b92d5e5a62cc68125ab5f150aa8c Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:27 +0300 Subject: qed: iWARP CM add passive side connect This patch implements the passive side connect. It addresses pre-allocating resources, creating a connection element upon valid SYN packet received. Calling upper layer and implementation of the accept/reject calls. Error handling is not part of this patch. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 2 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 11 + drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 939 +++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 62 ++ drivers/net/ethernet/qlogic/qed/qed_l2.c | 13 - drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 2 + include/linux/qed/common_hsi.h | 2 + include/linux/qed/qed_rdma_if.h | 26 +- 9 files changed, 1039 insertions(+), 20 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index fd8cd5e17121..91003bc6f00b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -789,6 +789,8 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_device_num_engines(struct qed_dev *cdev); int qed_device_get_port_id(struct qed_dev *cdev); +void qed_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6c8505dc5c31..4060a6ad9be3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -4127,3 +4127,14 @@ int qed_device_get_port_id(struct qed_dev *cdev) { return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev); } + +void qed_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, __le16 *fw_lsb, u8 *mac) +{ + ((u8 *)fw_msb)[0] = mac[1]; + ((u8 *)fw_msb)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lsb)[0] = mac[5]; + ((u8 *)fw_lsb)[1] = mac[4]; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 2bab57c6bae8..a6dadae1f998 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -44,9 +44,30 @@ #define QED_IWARP_ORD_DEFAULT 32 #define QED_IWARP_IRD_DEFAULT 32 +#define QED_IWARP_MAX_FW_MSS 4120 + +#define QED_EP_SIG 0xecabcdef + +struct mpa_v2_hdr { + __be16 ird; + __be16 ord; +}; + +#define MPA_V2_PEER2PEER_MODEL 0x8000 +#define MPA_V2_SEND_RTR 0x4000 /* on ird */ +#define MPA_V2_READ_RTR 0x4000 /* on ord */ +#define MPA_V2_WRITE_RTR 0x8000 +#define MPA_V2_IRD_ORD_MASK 0x3FFF + +#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED) + +#define QED_IWARP_INVALID_TCP_CID 0xffffffff #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) #define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024) +#define TIMESTAMP_HEADER_SIZE (12) + #define QED_IWARP_TS_EN BIT(0) +#define QED_IWARP_DA_EN BIT(1) #define QED_IWARP_PARAM_CRC_NEEDED (1) #define QED_IWARP_PARAM_P2P (1) @@ -63,7 +84,8 @@ void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn) dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; dev->max_qp = min_t(u32, IWARP_MAX_QPS, - p_hwfn->p_rdma_info->num_qps); + p_hwfn->p_rdma_info->num_qps) - + QED_IWARP_PREALLOC_CNT; dev->max_cq = dev->max_qp; @@ -78,12 +100,22 @@ void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->b_rdma_enabled_in_prs = true; } +/* We have two cid maps, one for tcp which should be used only from passive + * syn processing and replacing a pre-allocated ep in the list. The second + * for active tcp and for QPs. + */ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) { cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); spin_lock_bh(&p_hwfn->p_rdma_info->lock); - qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + + if (cid < QED_IWARP_PREALLOC_CNT) + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, + cid); + else + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } @@ -107,6 +139,45 @@ static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) return rc; } +static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid) +{ + cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +/* This function allocates a cid for passive tcp (called from syn receive) + * the reason it's separate from the regular cid allocation is because it + * is assured that these cids already have ilt allocated. They are preallocated + * to ensure that we won't need to allocate memory during syn processing + */ +static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid) +{ + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->tcp_cid_map, cid); + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "can't allocate iwarp tcp cid max-count=%d\n", + p_hwfn->p_rdma_info->tcp_cid_map.max_count); + + *cid = QED_IWARP_INVALID_TCP_CID; + return rc; + } + + *cid += qed_cxt_get_proto_cid_start(p_hwfn, + p_hwfn->p_rdma_info->proto); + return 0; +} + int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, struct qed_rdma_create_qp_out_params *out_params) @@ -403,6 +474,26 @@ int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return rc; } +static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + bool remove_from_active_list) +{ + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*ep->ep_buffer_virt), + ep->ep_buffer_virt, ep->ep_buffer_phys); + + if (remove_from_active_list) { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + + if (ep->qp) + ep->qp->ep = NULL; + + kfree(ep); +} + int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { int rc = 0; @@ -424,6 +515,507 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return rc; } +static int +qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out) +{ + struct qed_iwarp_ep *ep; + int rc; + + ep = kzalloc(sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + ep->state = QED_IWARP_EP_INIT; + + ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*ep->ep_buffer_virt), + &ep->ep_buffer_phys, + GFP_KERNEL); + if (!ep->ep_buffer_virt) { + rc = -ENOMEM; + goto err; + } + + ep->sig = QED_EP_SIG; + + *ep_out = ep; + + return 0; + +err: + kfree(ep); + return rc; +} + +static void +qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n", + p_tcp_ramrod->tcp.local_mac_addr_lo, + p_tcp_ramrod->tcp.local_mac_addr_mid, + p_tcp_ramrod->tcp.local_mac_addr_hi, + p_tcp_ramrod->tcp.remote_mac_addr_lo, + p_tcp_ramrod->tcp.remote_mac_addr_mid, + p_tcp_ramrod->tcp.remote_mac_addr_hi); + + if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n", + p_tcp_ramrod->tcp.local_ip, + p_tcp_ramrod->tcp.local_port, + p_tcp_ramrod->tcp.remote_ip, + p_tcp_ramrod->tcp.remote_port, + p_tcp_ramrod->tcp.vlan_id); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n", + p_tcp_ramrod->tcp.local_ip, + p_tcp_ramrod->tcp.local_port, + p_tcp_ramrod->tcp.remote_ip, + p_tcp_ramrod->tcp.remote_port, + p_tcp_ramrod->tcp.vlan_id); + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n", + p_tcp_ramrod->tcp.flow_label, + p_tcp_ramrod->tcp.ttl, + p_tcp_ramrod->tcp.tos_or_tc, + p_tcp_ramrod->tcp.mss, + p_tcp_ramrod->tcp.rcv_wnd_scale, + p_tcp_ramrod->tcp.connect_mode, + p_tcp_ramrod->tcp.flags); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n", + p_tcp_ramrod->tcp.syn_ip_payload_length, + p_tcp_ramrod->tcp.syn_phy_addr_lo, + p_tcp_ramrod->tcp.syn_phy_addr_hi); +} + +static int +qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod; + struct tcp_offload_params_opt2 *tcp; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + dma_addr_t async_output_phys; + dma_addr_t in_pdata_phys; + u16 physical_q; + u8 tcp_flags; + int rc; + int i; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = ep->tcp_cid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, + PROTOCOLID_IWARP, &init_data); + if (rc) + return rc; + + p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload; + + in_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, in_pdata); + DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr, + in_pdata_phys); + + p_tcp_ramrod->iwarp.incoming_ulp_buffer.len = + cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); + + async_output_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, async_output); + DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf, + async_output_phys); + + p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); + p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); + + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q); + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q); + p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev; + + tcp = &p_tcp_ramrod->tcp; + qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi, + &tcp->remote_mac_addr_mid, + &tcp->remote_mac_addr_lo, ep->remote_mac_addr); + qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid, + &tcp->local_mac_addr_lo, ep->local_mac_addr); + + tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); + + tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; + tcp->flags = 0; + SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, + !!(tcp_flags & QED_IWARP_TS_EN)); + + SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, + !!(tcp_flags & QED_IWARP_DA_EN)); + + tcp->ip_version = ep->cm_info.ip_version; + + for (i = 0; i < 4; i++) { + tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]); + tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]); + } + + tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port); + tcp->local_port = cpu_to_le16(ep->cm_info.local_port); + tcp->mss = cpu_to_le16(ep->mss); + tcp->flow_label = 0; + tcp->ttl = 0x40; + tcp->tos_or_tc = 0; + + tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; + tcp->connect_mode = ep->connect_mode; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + tcp->syn_ip_payload_length = + cpu_to_le16(ep->syn_ip_payload_length); + tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr); + tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr); + } + + qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc); + + return rc; +} + +static void +qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct qed_iwarp_cm_event_params params; + struct mpa_v2_hdr *mpa_v2; + union async_output *async_data; + u16 mpa_ord, mpa_ird; + u8 mpa_hdr_size = 0; + u8 mpa_rev; + + async_data = &ep->ep_buffer_virt->async_output; + + mpa_rev = async_data->mpa_request.mpa_handshake_mode; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "private_data_len=%x handshake_mode=%x private_data=(%x)\n", + async_data->mpa_request.ulp_data_len, + mpa_rev, *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata))); + + if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { + /* Read ord/ird values from private data buffer */ + mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata; + mpa_hdr_size = sizeof(*mpa_v2); + + mpa_ord = ntohs(mpa_v2->ord); + mpa_ird = ntohs(mpa_v2->ird); + + /* Temprary store in cm_info incoming ord/ird requested, later + * replace with negotiated value during accept + */ + ep->cm_info.ord = (u8)min_t(u16, + (mpa_ord & MPA_V2_IRD_ORD_MASK), + QED_IWARP_ORD_DEFAULT); + + ep->cm_info.ird = (u8)min_t(u16, + (mpa_ird & MPA_V2_IRD_ORD_MASK), + QED_IWARP_IRD_DEFAULT); + + /* Peer2Peer negotiation */ + ep->rtr_type = MPA_RTR_TYPE_NONE; + if (mpa_ird & MPA_V2_PEER2PEER_MODEL) { + if (mpa_ord & MPA_V2_WRITE_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; + + if (mpa_ord & MPA_V2_READ_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ; + + if (mpa_ird & MPA_V2_SEND_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; + + ep->rtr_type &= iwarp_info->rtr_type; + + /* if we're left with no match send our capabilities */ + if (ep->rtr_type == MPA_RTR_TYPE_NONE) + ep->rtr_type = iwarp_info->rtr_type; + } + + ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; + } else { + ep->cm_info.ord = QED_IWARP_ORD_DEFAULT; + ep->cm_info.ird = QED_IWARP_IRD_DEFAULT; + ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n", + mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type, + async_data->mpa_request.ulp_data_len, mpa_hdr_size); + + /* Strip mpa v2 hdr from private data before sending to upper layer */ + ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; + + ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len - + mpa_hdr_size; + + params.event = QED_IWARP_EVENT_MPA_REQUEST; + params.cm_info = &ep->cm_info; + params.ep_context = ep; + params.status = 0; + + ep->state = QED_IWARP_EP_MPA_REQ_RCVD; + ep->event_cb(ep->cb_context, ¶ms); +} + +static int +qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; + struct qed_sp_init_data init_data; + dma_addr_t async_output_phys; + struct qed_spq_entry *p_ent; + dma_addr_t out_pdata_phys; + dma_addr_t in_pdata_phys; + struct qed_rdma_qp *qp; + bool reject; + int rc; + + if (!ep) + return -EINVAL; + + qp = ep->qp; + reject = !qp; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = reject ? ep->tcp_cid : qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, + PROTOCOLID_IWARP, &init_data); + if (rc) + return rc; + + p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; + out_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, out_pdata); + DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr, + out_pdata_phys); + p_mpa_ramrod->common.outgoing_ulp_buffer.len = + ep->cm_info.private_data_len; + p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; + + p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord; + p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird; + + p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; + + in_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, in_pdata); + p_mpa_ramrod->tcp_connect_side = ep->connect_mode; + DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr, + in_pdata_phys); + p_mpa_ramrod->incoming_ulp_buffer.len = + cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); + async_output_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, async_output); + DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf, + async_output_phys); + p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); + p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); + + if (!reject) { + DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr, + qp->shared_queue_phys_addr); + p_mpa_ramrod->stats_counter_id = + RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; + } else { + p_mpa_ramrod->common.reject = 1; + } + + p_mpa_ramrod->mode = ep->mpa_rev; + SET_FIELD(p_mpa_ramrod->rtr_pref, + IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); + + ep->state = QED_IWARP_EP_MPA_OFFLOADED; + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (!reject) + ep->cid = qp->icid; /* Now they're migrated. */ + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n", + reject ? 0xffff : qp->icid, + ep->tcp_cid, + rc, + ep->cm_info.ird, + ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject); + return rc; +} + +static void +qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + ep->state = QED_IWARP_EP_INIT; + if (ep->qp) + ep->qp->ep = NULL; + ep->qp = NULL; + memset(&ep->cm_info, 0, sizeof(ep->cm_info)); + + if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { + /* We don't care about the return code, it's ok if tcp_cid + * remains invalid...in this case we'll defer allocation + */ + qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); + } + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + list_del(&ep->list_entry); + list_add_tail(&ep->list_entry, + &p_hwfn->p_rdma_info->iwarp.ep_free_list); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); +} + +#define QED_IWARP_CONNECT_MODE_STRING(ep) \ + ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" + +/* Called as a result of the event: + * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE + */ +static void +qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", + ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); + + params.cm_info = &ep->cm_info; + + params.ep_context = ep; + + ep->state = QED_IWARP_EP_CLOSED; + + switch (fw_return_code) { + case RDMA_RETURN_OK: + ep->qp->max_rd_atomic_req = ep->cm_info.ord; + ep->qp->max_rd_atomic_resp = ep->cm_info.ird; + qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1); + ep->state = QED_IWARP_EP_ESTABLISHED; + params.status = 0; + break; + default: + params.status = -ECONNRESET; + break; + } + + ep->event_cb(ep->cb_context, ¶ms); +} + +static void +qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 *mpa_data_size) +{ + struct mpa_v2_hdr *mpa_v2_params; + u16 mpa_ird, mpa_ord; + + *mpa_data_size = 0; + if (MPA_REV2(ep->mpa_rev)) { + mpa_v2_params = + (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata; + *mpa_data_size = sizeof(*mpa_v2_params); + + mpa_ird = (u16)ep->cm_info.ird; + mpa_ord = (u16)ep->cm_info.ord; + + if (ep->rtr_type != MPA_RTR_TYPE_NONE) { + mpa_ird |= MPA_V2_PEER2PEER_MODEL; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND) + mpa_ird |= MPA_V2_SEND_RTR; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) + mpa_ord |= MPA_V2_WRITE_RTR; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) + mpa_ord |= MPA_V2_READ_RTR; + } + + mpa_v2_params->ird = htons(mpa_ird); + mpa_v2_params->ord = htons(mpa_ord); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n", + mpa_v2_params->ird, + mpa_v2_params->ord, + *((u32 *)mpa_v2_params), + mpa_ord & MPA_V2_IRD_ORD_MASK, + mpa_ird & MPA_V2_IRD_ORD_MASK, + !!(mpa_ird & MPA_V2_PEER2PEER_MODEL), + !!(mpa_ird & MPA_V2_SEND_RTR), + !!(mpa_ord & MPA_V2_WRITE_RTR), + !!(mpa_ord & MPA_V2_READ_RTR)); + } +} + +static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_ep *ep = NULL; + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { + DP_ERR(p_hwfn, "Ep list is empty\n"); + goto out; + } + + ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, + struct qed_iwarp_ep, list_entry); + + /* in some cases we could have failed allocating a tcp cid when added + * from accept / failure... retry now..this is not the common case. + */ + if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { + rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); + + /* if we fail we could look for another entry with a valid + * tcp_cid, but since we don't expect to reach this anyway + * it's not worth the handling + */ + if (rc) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + ep = NULL; + goto out; + } + } + + list_del(&ep->list_entry); + +out: + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + return ep; +} + #define QED_IWARP_MAX_CID_CLEAN_TIME 100 #define QED_IWARP_MAX_NO_PROGRESS_CNT 5 @@ -465,20 +1057,213 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap) static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn) { + int rc; + int i; + + rc = qed_iwarp_wait_cid_map_cleared(p_hwfn, + &p_hwfn->p_rdma_info->tcp_cid_map); + if (rc) + return rc; + + /* Now free the tcp cids from the main cid map */ + for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++) + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i); + /* Now wait for all cids to be completed */ return qed_iwarp_wait_cid_map_cleared(p_hwfn, &p_hwfn->p_rdma_info->cid_map); } +static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_ep *ep; + + while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, + struct qed_iwarp_ep, list_entry); + + if (!ep) { + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + break; + } + list_del(&ep->list_entry); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID) + qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid); + + qed_iwarp_destroy_ep(p_hwfn, ep, false); + } +} + +static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init) +{ + struct qed_iwarp_ep *ep; + int rc = 0; + int count; + u32 cid; + int i; + + count = init ? QED_IWARP_PREALLOC_CNT : 1; + for (i = 0; i < count; i++) { + rc = qed_iwarp_create_ep(p_hwfn, &ep); + if (rc) + return rc; + + /* During initialization we allocate from the main pool, + * afterwards we allocate only from the tcp_cid. + */ + if (init) { + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + goto err; + qed_iwarp_set_tcp_cid(p_hwfn, cid); + } else { + /* We don't care about the return code, it's ok if + * tcp_cid remains invalid...in this case we'll + * defer allocation + */ + qed_iwarp_alloc_tcp_cid(p_hwfn, &cid); + } + + ep->tcp_cid = cid; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, + &p_hwfn->p_rdma_info->iwarp.ep_free_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + + return rc; + +err: + qed_iwarp_destroy_ep(p_hwfn, ep, false); + + return rc; +} + int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) { + int rc; + + /* Allocate bitmap for tcp cid. These are used by passive side + * to ensure it can allocate a tcp cid during dpc that was + * pre-acquired and doesn't require dynamic allocation of ilt + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, + QED_IWARP_PREALLOC_CNT, "TCP_CID"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate tcp cid, rc = %d\n", rc); + return rc; + } + + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list); spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); - return 0; + return qed_iwarp_prealloc_ep(p_hwfn, true); } void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) { + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); +} + +int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + int rc; + + ep = (struct qed_iwarp_ep *)iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", + iparams->qp->icid, ep->tcp_cid); + + if ((iparams->ord > QED_IWARP_ORD_DEFAULT) || + (iparams->ird > QED_IWARP_IRD_DEFAULT)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", + iparams->qp->icid, + ep->tcp_cid, iparams->ord, iparams->ord); + return -EINVAL; + } + + qed_iwarp_prealloc_ep(p_hwfn, false); + + ep->cb_context = iparams->cb_context; + ep->qp = iparams->qp; + ep->qp->ep = ep; + + if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { + /* Negotiate ord/ird: if upperlayer requested ord larger than + * ird advertised by remote, we need to decrease our ord + */ + if (iparams->ord > ep->cm_info.ird) + iparams->ord = ep->cm_info.ird; + + if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && + (iparams->ird == 0)) + iparams->ird = 1; + } + + /* Update cm_info ord/ird to be negotiated values */ + ep->cm_info.ord = iparams->ord; + ep->cm_info.ird = iparams->ird; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->private_data, iparams->private_data_len); + + rc = qed_iwarp_mpa_offload(p_hwfn, ep); + if (rc) + qed_iwarp_modify_qp(p_hwfn, + iparams->qp, QED_IWARP_QP_STATE_ERROR, 1); + + return rc; +} + +int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + + ep = iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid); + + ep->cb_context = iparams->cb_context; + ep->qp = NULL; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->private_data, iparams->private_data_len); + + return qed_iwarp_mpa_offload(p_hwfn, ep); } static void @@ -526,6 +1311,38 @@ qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, return rc; } +static bool +qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) +{ + struct qed_iwarp_ep *ep = NULL; + bool found = false; + + list_for_each_entry(ep, + &p_hwfn->p_rdma_info->iwarp.ep_list, + list_entry) { + if ((ep->cm_info.local_port == cm_info->local_port) && + (ep->cm_info.remote_port == cm_info->remote_port) && + (ep->cm_info.vlan == cm_info->vlan) && + !memcmp(&ep->cm_info.local_ip, cm_info->local_ip, + sizeof(cm_info->local_ip)) && + !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip, + sizeof(cm_info->remote_ip))) { + found = true; + break; + } + } + + if (found) { + DP_NOTICE(p_hwfn, + "SYN received on active connection - dropping\n"); + qed_iwarp_print_cm_info(p_hwfn, cm_info); + + return true; + } + + return false; +} + static struct qed_iwarp_listener * qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) @@ -596,9 +1413,8 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); - memcpy(remote_mac_addr, ethh->h_source, ETH_ALEN); - - memcpy(local_mac_addr, ethh->h_dest, ETH_ALEN); + ether_addr_copy(remote_mac_addr, ethh->h_source); + ether_addr_copy(local_mac_addr, ethh->h_dest); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n", eth_type, ethh->h_source); @@ -661,9 +1477,12 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) struct qed_hwfn *p_hwfn = cxt; u8 remote_mac_addr[ETH_ALEN]; u8 local_mac_addr[ETH_ALEN]; + struct qed_iwarp_ep *ep; int tcp_start_offset; + u8 ts_hdr_size = 0; u8 ll2_syn_handle; int payload_len; + u32 hdr_size; int rc; memset(&cm_info, 0, sizeof(cm_info)); @@ -719,6 +1538,49 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) } DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n"); + /* There may be an open ep on this connection if this is a syn + * retrasnmit... need to make sure there isn't... + */ + if (qed_iwarp_ep_exists(p_hwfn, &cm_info)) + goto err; + + ep = qed_iwarp_get_free_ep(p_hwfn); + if (!ep) + goto err; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ether_addr_copy(ep->remote_mac_addr, remote_mac_addr); + ether_addr_copy(ep->local_mac_addr, local_mac_addr); + + memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); + + if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) + ts_hdr_size = TIMESTAMP_HEADER_SIZE; + + hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) + + ts_hdr_size; + ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; + ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); + + ep->event_cb = listener->event_cb; + ep->cb_context = listener->cb_context; + ep->connect_mode = TCP_CONNECT_PASSIVE; + + ep->syn = buf; + ep->syn_ip_payload_length = (u16)payload_len; + ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset + + tcp_start_offset; + + rc = qed_iwarp_tcp_offload(p_hwfn, ep); + if (rc) { + qed_iwarp_return_ep(p_hwfn, ep); + goto err; + } + + return; err: qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle); } @@ -905,7 +1767,12 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; + iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND | + MPA_RTR_TYPE_ZERO_WRITE | + MPA_RTR_TYPE_ZERO_READ; + spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list); INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list); qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, @@ -918,6 +1785,7 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { int rc; + qed_iwarp_free_prealloc_ep(p_hwfn); rc = qed_iwarp_wait_for_all_cids(p_hwfn); if (rc) return rc; @@ -927,11 +1795,70 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return qed_iwarp_ll2_stop(p_hwfn, p_ptt); } +void +qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + /* Done with the SYN packet, post back to ll2 rx */ + qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, + p_hwfn->p_rdma_info->iwarp.ll2_syn_handle); + ep->syn = NULL; + + /* If connect failed - upper layer doesn't know about it */ + qed_iwarp_mpa_received(p_hwfn, ep); +} + +static inline bool +qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + if (!ep || (ep->sig != QED_EP_SIG)) { + DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep); + return false; + } + + return true; +} + static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u16 echo, union event_ring_data *data, u8 fw_return_code) { + struct regpair *fw_handle = &data->rdma_data.async_handle; + struct qed_iwarp_ep *ep = NULL; + u16 cid; + + ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, + fw_handle->lo); + + switch (fw_event_code) { + case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE: + /* Async completion after TCP 3-way handshake */ + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n", + ep->tcp_cid, fw_return_code); + qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED: + cid = (u16)le32_to_cpu(fw_handle->lo); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid); + qed_iwarp_cid_cleaned(p_hwfn, cid); + + break; + } return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 29005ac83a6a..bedac98c834c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -42,6 +42,8 @@ enum qed_iwarp_qp_state { enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); +#define QED_IWARP_PREALLOC_CNT (256) + #define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_RX_SIZE (256) #define QED_IWARP_MAX_SYN_PKT_SIZE (128) @@ -55,6 +57,8 @@ struct qed_iwarp_ll2_buff { struct qed_iwarp_info { struct list_head listen_list; /* qed_iwarp_listener */ + struct list_head ep_list; /* qed_iwarp_ep */ + struct list_head ep_free_list; /* pre-allocated ep's */ spinlock_t iw_lock; /* for iwarp resources */ spinlock_t qp_lock; /* for teardown races */ u32 rcv_wnd_scale; @@ -68,6 +72,61 @@ struct qed_iwarp_info { enum mpa_rtr_type rtr_type; }; +enum qed_iwarp_ep_state { + QED_IWARP_EP_INIT, + QED_IWARP_EP_MPA_REQ_RCVD, + QED_IWARP_EP_MPA_OFFLOADED, + QED_IWARP_EP_ESTABLISHED, + QED_IWARP_EP_CLOSED +}; + +union async_output { + struct iwarp_eqe_data_mpa_async_completion mpa_response; + struct iwarp_eqe_data_tcp_async_completion mpa_request; +}; + +#define QED_MAX_PRIV_DATA_LEN (512) +struct qed_iwarp_ep_memory { + u8 in_pdata[QED_MAX_PRIV_DATA_LEN]; + u8 out_pdata[QED_MAX_PRIV_DATA_LEN]; + union async_output async_output; +}; + +/* Endpoint structure represents a TCP connection. This connection can be + * associated with a QP or not (in which case QP==NULL) + */ +struct qed_iwarp_ep { + struct list_head list_entry; + struct qed_rdma_qp *qp; + struct qed_iwarp_ep_memory *ep_buffer_virt; + dma_addr_t ep_buffer_phys; + enum qed_iwarp_ep_state state; + int sig; + struct qed_iwarp_cm_info cm_info; + enum tcp_connect_mode connect_mode; + enum mpa_rtr_type rtr_type; + enum mpa_negotiation_mode mpa_rev; + u32 tcp_cid; + u32 cid; + u16 mss; + u8 remote_mac_addr[6]; + u8 local_mac_addr[6]; + bool mpa_reply_processed; + + /* For Passive side - syn packet related data */ + u16 syn_ip_payload_length; + struct qed_iwarp_ll2_buff *syn; + dma_addr_t syn_phy_addr; + + /* The event_cb function is called for asynchrounous events associated + * with the ep. It is initialized at different entry points depending + * on whether the ep is the tcp connection active side or passive side + * The cb_context is passed to the event_cb function. + */ + iwarp_event_handler event_cb; + void *cb_context; +}; + struct qed_iwarp_listener { struct list_head list_entry; @@ -115,6 +174,9 @@ qed_iwarp_create_listen(void *rdma_cxt, struct qed_iwarp_listen_in *iparams, struct qed_iwarp_listen_out *oparams); +int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams); + +int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams); int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 27ea54ba7e1b..0ba5ec8a9814 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1227,19 +1227,6 @@ static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) return action; } -static void qed_set_fw_mac_addr(__le16 *fw_msb, - __le16 *fw_mid, - __le16 *fw_lsb, - u8 *mac) -{ - ((u8 *)fw_msb)[0] = mac[1]; - ((u8 *)fw_msb)[1] = mac[0]; - ((u8 *)fw_mid)[0] = mac[3]; - ((u8 *)fw_mid)[1] = mac[2]; - ((u8 *)fw_lsb)[0] = mac[5]; - ((u8 *)fw_lsb)[1] = mac[4]; -} - static int qed_filter_ucast_common(struct qed_hwfn *p_hwfn, u16 opaque_fid, diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 90e4e0f13727..18ec9cbd84f5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -85,6 +85,7 @@ struct qed_rdma_info { struct qed_bmap qp_map; struct qed_bmap srq_map; struct qed_bmap cid_map; + struct qed_bmap tcp_cid_map; struct qed_bmap real_cid_map; struct qed_bmap dpi_map; struct qed_bmap toggle_bits; @@ -167,6 +168,7 @@ struct qed_rdma_qp { void *shared_queue; dma_addr_t shared_queue_phys_addr; + struct qed_iwarp_ep *ep; }; #if IS_ENABLED(CONFIG_QED_RDMA) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index c3752c56e54d..ab4ad8a1e2a5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -111,6 +111,8 @@ union ramrod_data { struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; struct rdma_srq_modify_ramrod_data rdma_modify_srq; struct iwarp_create_qp_ramrod_data iwarp_create_qp; + struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload; + struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload; struct iwarp_modify_qp_ramrod_data iwarp_modify_qp; struct iwarp_init_func_ramrod_data iwarp_init_func; struct fcoe_init_ramrod_params fcoe_init; diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 885ae1379b5a..39e2a2ac2471 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -38,6 +38,8 @@ #include /* dma_addr_t manip */ +#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) +#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_REGPAIR_LE(x, val) do { \ diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 28df5688ad0c..c4c241fe2579 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -471,7 +471,8 @@ struct qed_rdma_counters_out_params { #define QED_ROCE_TX_FRAG_FAILURE (2) enum qed_iwarp_event_type { - QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ + QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ + QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ }; enum qed_tcp_ip_version { @@ -516,6 +517,23 @@ struct qed_iwarp_listen_out { void *handle; }; +struct qed_iwarp_accept_in { + void *ep_context; + void *cb_context; + struct qed_rdma_qp *qp; + const void *private_data; + u16 private_data_len; + u8 ord; + u8 ird; +}; + +struct qed_iwarp_reject_in { + void *ep_context; + void *cb_context; + const void *private_data; + u16 private_data_len; +}; + struct qed_roce_ll2_header { void *vaddr; dma_addr_t baddr; @@ -626,6 +644,12 @@ struct qed_rdma_ops { struct qed_iwarp_listen_in *iparams, struct qed_iwarp_listen_out *oparams); + int (*iwarp_accept)(void *rdma_cxt, + struct qed_iwarp_accept_in *iparams); + + int (*iwarp_reject)(void *rdma_cxt, + struct qed_iwarp_reject_in *iparams); + int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); }; -- cgit v1.2.3 From 4b0fdd7c8b757125ac7996617d914bbdb9e0348c Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:28 +0300 Subject: qed: iWARP CM add active side connect This patch implements the active side connect. Offload a connection, process MPA reply and send RTR. In some of the common passive/active functions, the active side will work in blocking mode. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 240 ++++++++++++++++++++++++++-- drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 7 + drivers/net/ethernet/qlogic/qed/qed_rdma.c | 4 + include/linux/qed/qed_rdma_if.h | 26 +++ 4 files changed, 265 insertions(+), 12 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index a6dadae1f998..a5da9fc6f454 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -611,7 +611,10 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) memset(&init_data, 0, sizeof(init_data)); init_data.cid = ep->tcp_cid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_CB; + if (ep->connect_mode == TCP_CONNECT_PASSIVE) + init_data.comp_mode = QED_SPQ_MODE_CB; + else + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, @@ -711,7 +714,7 @@ qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "private_data_len=%x handshake_mode=%x private_data=(%x)\n", async_data->mpa_request.ulp_data_len, - mpa_rev, *((u32 *)((u8 *)ep->ep_buffer_virt->in_pdata))); + mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata))); if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { /* Read ord/ird values from private data buffer */ @@ -801,7 +804,10 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) init_data.cid = reject ? ep->tcp_cid : qp->icid; init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; - init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + if (ep->connect_mode == TCP_CONNECT_ACTIVE) + init_data.comp_mode = QED_SPQ_MODE_CB; + else + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, @@ -890,6 +896,59 @@ qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); } +void +qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct mpa_v2_hdr *mpa_v2_params; + union async_output *async_data; + u16 mpa_ird, mpa_ord; + u8 mpa_data_size = 0; + + if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { + mpa_v2_params = + (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata); + mpa_data_size = sizeof(*mpa_v2_params); + mpa_ird = ntohs(mpa_v2_params->ird); + mpa_ord = ntohs(mpa_v2_params->ord); + + ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); + ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); + } + async_data = &ep->ep_buffer_virt->async_output; + + ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; + ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len - + mpa_data_size; +} + +void +qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_cm_event_params params; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + DP_NOTICE(p_hwfn, + "MPA reply event not expected on passive side!\n"); + return; + } + + params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY; + + qed_iwarp_parse_private_data(p_hwfn, ep); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", + ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); + + params.cm_info = &ep->cm_info; + params.ep_context = ep; + params.status = 0; + + ep->mpa_reply_processed = true; + + ep->event_cb(ep->cb_context, ¶ms); +} + #define QED_IWARP_CONNECT_MODE_STRING(ep) \ ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" @@ -902,7 +961,13 @@ qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, { struct qed_iwarp_cm_event_params params; - params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; + if (ep->connect_mode == TCP_CONNECT_ACTIVE) + params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; + else + params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; + + if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed) + qed_iwarp_parse_private_data(p_hwfn, ep); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", @@ -977,6 +1042,102 @@ qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn, } } +int qed_iwarp_connect(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_info *iwarp_info; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + u8 ts_hdr_size = 0; + u32 cid; + int rc; + + if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) || + (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) { + DP_NOTICE(p_hwfn, + "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", + iparams->qp->icid, iparams->cm_info.ord, + iparams->cm_info.ird); + + return -EINVAL; + } + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + + /* Allocate ep object */ + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + return rc; + + rc = qed_iwarp_create_ep(p_hwfn, &ep); + if (rc) + goto err; + + ep->tcp_cid = cid; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep->qp = iparams->qp; + ep->qp->ep = ep; + ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr); + ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr); + memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info)); + + ep->cm_info.ord = iparams->cm_info.ord; + ep->cm_info.ird = iparams->cm_info.ird; + + ep->rtr_type = iwarp_info->rtr_type; + if (!iwarp_info->peer2peer) + ep->rtr_type = MPA_RTR_TYPE_NONE; + + if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0)) + ep->cm_info.ord = 1; + + ep->mpa_rev = iwarp_info->mpa_rev; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->cm_info.private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->cm_info.private_data, + iparams->cm_info.private_data_len); + + if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) + ts_hdr_size = TIMESTAMP_HEADER_SIZE; + + ep->mss = iparams->mss - ts_hdr_size; + ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); + + ep->event_cb = iparams->event_cb; + ep->cb_context = iparams->cb_context; + ep->connect_mode = TCP_CONNECT_ACTIVE; + + oparams->ep_context = ep; + + rc = qed_iwarp_tcp_offload(p_hwfn, ep); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n", + iparams->qp->icid, ep->tcp_cid, rc); + + if (rc) { + qed_iwarp_destroy_ep(p_hwfn, ep, true); + goto err; + } + + return rc; +err: + qed_iwarp_cid_cleaned(p_hwfn, cid); + + return rc; +} + static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn) { struct qed_iwarp_ep *ep = NULL; @@ -1174,12 +1335,12 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) { - struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_hwfn *p_hwfn = rdma_cxt; struct qed_iwarp_ep *ep; u8 mpa_data_size = 0; int rc; - ep = (struct qed_iwarp_ep *)iparams->ep_context; + ep = iparams->ep_context; if (!ep) { DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n"); return -EINVAL; @@ -1799,13 +1960,19 @@ void qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) { - /* Done with the SYN packet, post back to ll2 rx */ - qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, - p_hwfn->p_rdma_info->iwarp.ll2_syn_handle); - ep->syn = NULL; + u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + /* Done with the SYN packet, post back to ll2 rx */ + qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle); - /* If connect failed - upper layer doesn't know about it */ - qed_iwarp_mpa_received(p_hwfn, ep); + ep->syn = NULL; + + /* If connect failed - upper layer doesn't know about it */ + qed_iwarp_mpa_received(p_hwfn, ep); + } else { + qed_iwarp_mpa_offload(p_hwfn, ep); + } } static inline bool @@ -1842,6 +2009,16 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, ep->tcp_cid, fw_return_code); qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); break; + /* Async event for active side only */ + case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_mpa_reply_arrived(p_hwfn, ep); + break; case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) return -EINVAL; @@ -1918,6 +2095,45 @@ int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle) return 0; } +int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + struct qed_iwarp_ep *ep; + struct qed_rdma_qp *qp; + int rc; + + ep = iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n"); + return -EINVAL; + } + + qp = ep->qp; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", + qp->icid, ep->tcp_cid); + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, + PROTOCOLID_IWARP, &init_data); + + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc); + + return rc; +} + void qed_iwarp_query_qp(struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *out_params) diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index bedac98c834c..148ef3c33a5d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -169,6 +169,11 @@ int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); void qed_iwarp_query_qp(struct qed_rdma_qp *qp, struct qed_rdma_query_qp_out_params *out_params); +int +qed_iwarp_connect(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams); + int qed_iwarp_create_listen(void *rdma_cxt, struct qed_iwarp_listen_in *iparams, @@ -179,4 +184,6 @@ int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams); int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams); int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle); +int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 29de915007a0..6fb99518a61f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1772,8 +1772,12 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = { .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, .ll2_get_stats = &qed_ll2_get_stats, + .iwarp_connect = &qed_iwarp_connect, .iwarp_create_listen = &qed_iwarp_create_listen, .iwarp_destroy_listen = &qed_iwarp_destroy_listen, + .iwarp_accept = &qed_iwarp_accept, + .iwarp_reject = &qed_iwarp_reject, + .iwarp_send_rtr = &qed_iwarp_send_rtr, }; const struct qed_rdma_ops *qed_get_rdma_ops(void) diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index c4c241fe2579..e9514a69b03f 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -473,6 +473,8 @@ struct qed_rdma_counters_out_params { enum qed_iwarp_event_type { QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ + QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ + QED_IWARP_EVENT_ACTIVE_MPA_REPLY, }; enum qed_tcp_ip_version { @@ -503,6 +505,20 @@ struct qed_iwarp_cm_event_params { typedef int (*iwarp_event_handler) (void *context, struct qed_iwarp_cm_event_params *event); +struct qed_iwarp_connect_in { + iwarp_event_handler event_cb; + void *cb_context; + struct qed_rdma_qp *qp; + struct qed_iwarp_cm_info cm_info; + u16 mss; + u8 remote_mac_addr[ETH_ALEN]; + u8 local_mac_addr[ETH_ALEN]; +}; + +struct qed_iwarp_connect_out { + void *ep_context; +}; + struct qed_iwarp_listen_in { iwarp_event_handler event_cb; void *cb_context; /* passed to event_cb */ @@ -534,6 +550,10 @@ struct qed_iwarp_reject_in { u16 private_data_len; }; +struct qed_iwarp_send_rtr_in { + void *ep_context; +}; + struct qed_roce_ll2_header { void *vaddr; dma_addr_t baddr; @@ -640,6 +660,10 @@ struct qed_rdma_ops { int (*ll2_set_mac_filter)(struct qed_dev *cdev, u8 *old_mac_address, u8 *new_mac_address); + int (*iwarp_connect)(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams); + int (*iwarp_create_listen)(void *rdma_cxt, struct qed_iwarp_listen_in *iparams, struct qed_iwarp_listen_out *oparams); @@ -652,6 +676,8 @@ struct qed_rdma_ops { int (*iwarp_destroy_listen)(void *rdma_cxt, void *handle); + int (*iwarp_send_rtr)(void *rdma_cxt, + struct qed_iwarp_send_rtr_in *iparams); }; const struct qed_rdma_ops *qed_get_rdma_ops(void); -- cgit v1.2.3 From fc4c6065e661224df3db50780219ac53fee56e2b Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:29 +0300 Subject: qed: iWARP implement disconnect flows This patch takes care of active/passive disconnect flows. Disconnect flows can be initiated remotely, in which case a async event will arrive from peer and indicated to qedr driver. These are referred to as exceptions. When a QP is destroyed, it needs to check that it's associated ep has been closed. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 90 ++++++++++++++++++++++++++++- include/linux/qed/qed_rdma_if.h | 2 + 2 files changed, 91 insertions(+), 1 deletion(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index a5da9fc6f454..84bcda314cd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -496,6 +496,8 @@ static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn, int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { + struct qed_iwarp_ep *ep = qp->ep; + int wait_count = 0; int rc = 0; if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) { @@ -505,6 +507,18 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return rc; } + /* Make sure ep is closed before returning and freeing memory. */ + if (ep) { + while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200) + msleep(100); + + if (ep->state != QED_IWARP_EP_CLOSED) + DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n", + ep->state); + + qed_iwarp_destroy_ep(p_hwfn, ep, false); + } + rc = qed_iwarp_fw_destroy(p_hwfn, qp); if (qp->shared_queue) @@ -1956,6 +1970,61 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) return qed_iwarp_ll2_stop(p_hwfn, p_ptt); } +void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true); + + params.event = QED_IWARP_EVENT_CLOSE; + params.ep_context = ep; + params.cm_info = &ep->cm_info; + params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ? + 0 : -ECONNRESET; + + ep->state = QED_IWARP_EP_CLOSED; + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep->event_cb(ep->cb_context, ¶ms); +} + +void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, int fw_ret_code) +{ + struct qed_iwarp_cm_event_params params; + bool event_cb = false; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n", + ep->cid, fw_ret_code); + + switch (fw_ret_code) { + case IWARP_EXCEPTION_DETECTED_LLP_CLOSED: + params.status = 0; + params.event = QED_IWARP_EVENT_DISCONNECT; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LLP_RESET: + params.status = -ECONNRESET; + params.event = QED_IWARP_EVENT_DISCONNECT; + event_cb = true; + break; + default: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Unhandled exception received...fw_ret_code=%d\n", + fw_ret_code); + break; + } + + if (event_cb) { + params.ep_context = ep; + params.cm_info = &ep->cm_info; + ep->event_cb(ep->cb_context, ¶ms); + } +} + void qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) @@ -2009,8 +2078,27 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, ep->tcp_cid, fw_return_code); qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); break; - /* Async event for active side only */ + case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED: + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_exception_received(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE: + /* Async completion for Close Connection ramrod */ + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code); + break; case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: + /* Async event for active side only */ if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) return -EINVAL; DP_VERBOSE(p_hwfn, diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index e9514a69b03f..01966c3a3e5d 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -474,6 +474,8 @@ enum qed_iwarp_event_type { QED_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */ QED_IWARP_EVENT_PASSIVE_COMPLETE, /* ack on mpa response */ QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ + QED_IWARP_EVENT_DISCONNECT, + QED_IWARP_EVENT_CLOSE, QED_IWARP_EVENT_ACTIVE_MPA_REPLY, }; -- cgit v1.2.3 From 9816b614346925feac1198e33d2dc5201c4ef74e Mon Sep 17 00:00:00 2001 From: "Kalderon, Michal" Date: Sun, 2 Jul 2017 10:29:30 +0300 Subject: qed: iWARP CM add error handling This patch introduces error handling for errors that occurred during connection establishment. Signed-off-by: Michal Kalderon Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 183 +++++++++++++++++++++++++++- include/linux/qed/qed_rdma_if.h | 9 ++ 2 files changed, 190 insertions(+), 2 deletions(-) (limited to 'include/linux/qed') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 84bcda314cd2..5cd20da2d4e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1001,12 +1001,75 @@ qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, ep->state = QED_IWARP_EP_ESTABLISHED; params.status = 0; break; + case IWARP_CONN_ERROR_MPA_TIMEOUT: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -EBUSY; + break; + case IWARP_CONN_ERROR_MPA_ERROR_REJECT: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_RST: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid, + ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_MPA_FIN: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INSUF_IRD: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_RTR_MISMATCH: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INVALID_PACKET: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_LOCAL_ERROR: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_TERMINATE: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; default: params.status = -ECONNRESET; break; } ep->event_cb(ep->cb_context, ¶ms); + + /* on passive side, if there is no associated QP (REJECT) we need to + * return the ep to the pool, (in the regular case we add an element + * in accept instead of this one. + * In both cases we need to remove it from the ep_list. + */ + if (fw_return_code != RDMA_RETURN_OK) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && + (!ep->qp)) { /* Rejected */ + qed_iwarp_return_ep(p_hwfn, ep); + } else { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + } } static void @@ -2011,6 +2074,42 @@ void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, params.event = QED_IWARP_EVENT_DISCONNECT; event_cb = true; break; + case IWARP_EXCEPTION_DETECTED_RQ_EMPTY: + params.event = QED_IWARP_EVENT_RQ_EMPTY; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_IRQ_FULL: + params.event = QED_IWARP_EVENT_IRQ_FULL; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT: + params.event = QED_IWARP_EVENT_LLP_TIMEOUT; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR: + params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW: + params.event = QED_IWARP_EVENT_CQ_OVERFLOW; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC: + params.event = QED_IWARP_EVENT_QP_CATASTROPHIC; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR: + params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR: + params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED: + params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED; + event_cb = true; + break; default: DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Unhandled exception received...fw_ret_code=%d\n", @@ -2025,6 +2124,66 @@ void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, } } +static void +qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + memset(¶ms, 0, sizeof(params)); + params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; + params.ep_context = ep; + params.cm_info = &ep->cm_info; + ep->state = QED_IWARP_EP_CLOSED; + + switch (fw_return_code) { + case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "%s(0x%x) TCP connect got invalid packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_TCP_CONNECTION_RST: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "%s(0x%x) TCP Connection Reset\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT: + DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -EBUSY; + break; + case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INVALID_PACKET: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + default: + DP_ERR(p_hwfn, + "%s(0x%x) Unexpected return code tcp connect: %d\n", + QED_IWARP_CONNECT_MODE_STRING(ep), + ep->tcp_cid, fw_return_code); + params.status = -ECONNRESET; + break; + } + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + qed_iwarp_return_ep(p_hwfn, ep); + } else { + ep->event_cb(ep->cb_context, ¶ms); + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } +} + void qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep, u8 fw_return_code) @@ -2038,9 +2197,17 @@ qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, ep->syn = NULL; /* If connect failed - upper layer doesn't know about it */ - qed_iwarp_mpa_received(p_hwfn, ep); + if (fw_return_code == RDMA_RETURN_OK) + qed_iwarp_mpa_received(p_hwfn, ep); + else + qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, + fw_return_code); } else { - qed_iwarp_mpa_offload(p_hwfn, ep); + if (fw_return_code == RDMA_RETURN_OK) + qed_iwarp_mpa_offload(p_hwfn, ep); + else + qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, + fw_return_code); } } @@ -2123,6 +2290,18 @@ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, qed_iwarp_cid_cleaned(p_hwfn, cid); break; + case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); + + p_hwfn->p_rdma_info->events.affiliated_event( + p_hwfn->p_rdma_info->events.context, + QED_IWARP_EVENT_CQ_OVERFLOW, + (void *)fw_handle); + break; + default: + DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n", + fw_event_code); + return -EINVAL; } return 0; } diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 01966c3a3e5d..4dd72ba210f5 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -476,7 +476,16 @@ enum qed_iwarp_event_type { QED_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */ QED_IWARP_EVENT_DISCONNECT, QED_IWARP_EVENT_CLOSE, + QED_IWARP_EVENT_IRQ_FULL, + QED_IWARP_EVENT_RQ_EMPTY, + QED_IWARP_EVENT_LLP_TIMEOUT, + QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR, + QED_IWARP_EVENT_CQ_OVERFLOW, + QED_IWARP_EVENT_QP_CATASTROPHIC, QED_IWARP_EVENT_ACTIVE_MPA_REPLY, + QED_IWARP_EVENT_LOCAL_ACCESS_ERROR, + QED_IWARP_EVENT_REMOTE_OPERATION_ERROR, + QED_IWARP_EVENT_TERMINATE_RECEIVED }; enum qed_tcp_ip_version { -- cgit v1.2.3