summaryrefslogtreecommitdiff
path: root/include/linux/mlx5/device.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-05-31 04:25:03 +0300
committerDavid S. Miller <davem@davemloft.net>2015-05-31 04:25:03 +0300
commit1dcf3ac49fb0079ed7d3d3f4a4cf25e9fb9fffc6 (patch)
tree64a8da331114ff1b10ddd2deeacb7e2f7aa98af0 /include/linux/mlx5/device.h
parent8ed9b5e1c8f3cfc0d8c94f1a19d1167422eea7a8 (diff)
parentf62b8bb8f2d30582f30f51e85a8c0e1260125d7e (diff)
downloadlinux-1dcf3ac49fb0079ed7d3d3f4a4cf25e9fb9fffc6.tar.xz
Merge branch 'mlx5-next'
Amir Vadai says: ==================== net/mlx5: ConnectX-4 100G Ethernet driver This patchset extends the mlx5_core driver to support Ethernet functionality. The Ethernet functionality in the mlx5 driver is integrated into the core driver and not as separated driver. The IB functionality remains in the mlx5_ib driver as before. This functionality will enable the Ethernet capability of Mellanox's new famility of cards - ConnectX-4. Due to the fact that backword compatability is being kept, existing Connect-IB cards that are using this driver are fully working with the modified driver, and no issues with current deployments should be seen. Like the ConnectX-3 cards, ConnectX-4 is a VPI (Virtual Port Interface - every port can be configured as Infiniband or Ethernet) card. Unlike previous generations, the ConnectX-4 has a separate PCI function per port. The current code has a limitation that Infiniband and Ethernet port types are mutually exclusive. When the driver is compiled with Ethernet support, the Infiniband functionality is disabled and vice versa. To control that we added the CONFIG_MLX5_CORE_EN config directive which is 'n' by default, but can be changed by the user. This limitation is short-lived and would be addressed soon. As part of this patchset, mlx5_ifc.h was heavily modified [1]. This file is now generated automatically from the device specification document. Since this patch is too big for the mail server, it might be missing in the mailing list, but could be pulled from an external git repository [2]. irq name selection is done at driver initialization and doesn't contain the interface name as part of the irq name. irq_balancer will still work thanks to an improvement introduced by Neil Horman [3] to use sysfs instead of /proc/interrupts. Patchset was applied on top of commit ed2dfd9 ("tcp/dccp: warn user for preferred ip_local_port_range") [1] - Patch 4/11 ("net/mlx5_core: HW data structs/types definitions preparation for mlx5 ehternet driver") [2] - http://git.openfabrics.org/?p=~amirv/linux.git;a=shortlog;h=refs/heads/mlx5e_v1 [3] - kernel: da8d1c8 PCI/sysfs: add per pci device msi[x] irq listing (v5) irq_balancer: 32a7757 Complete rework of how we detect and classify irqs Thanks to Achiad, Saeed, Yevheny, Or and the whole team for making this happen, Amir Changes from V4: - Removed Patch 3/12: net/mlx5_core: Add EQ renaming mechanism - Patch 12/12: net/mlx5: Extend mlx5_core to support ConnectX-4 Ethernet functionality - irq name is created on driver initialization, therefore it won't contain the network interface name in it. This won't effect irq_balancer thanks to patches introduced by Neil Horman to use sysfs instead of /proc/interrupts. Changes from V3: - PATCH 8/11: net/mlx5_core: Set/Query port MTU commands - Return value directly - no need for err. Changes from V2: - Improved changelogs and cover-letter - Added CONFIG_MLX5_EN to disable/enable the Ethernet functionality - Moved en.h and wq.[ch] into the patch with data-path related code Changes from V1: - Added patch 1/12 ("net/mlx5_core,mlx5_ib: Do not use vmap() on coherent memory") Changes from V0: - Removed V0 Patch 1/11 ("net/mlx5_core: Virtually extend work/completion queue buffers by one page") due to misuse of DMA API. Thanks Dave. - Patch 1/11 ("net/mlx5_core: Set irq affinity hints"): - Use kcalloc instead of kzalloc - Fix build error when CONFIG_CPUMASK_OFFSTACK=n. Driver loading will fail now if cpumask allocation is failing. - Using dev_to_node helper. Thanks, Ido. - Patch 3/11 ("net/mlx5_core: HW data structs/types definitions preparation for mlx5 ehternet driver") - Removed Mellanox internal comment at the head of the file. Thanks Joe - Patch 6/11 ("net/mlx5_core: Implement get/set port status") - Use direct return of function's result. Thanks Sergei. - Added Patch 8/11 ("net/mlx5_core: Set/Query port MTU commands") - Patch 9/11 ("net/mlx5: Ethernet Datapath files") - Use rq->wqe_sz instead of skb_end_offset. Thanks Ido. - Use dma_wmb() when possible instead of wmb(). Thanks Alex. - Fix checkpatch issues - Patch 10/11 ("net/mlx5: ethernet resources handling") - checkpatch issues - Added missing include - Patch 11/11 ("net/mlx5: Ethernet driver") - checkpatch issues - fixed typo - Modified use of affinity hint - Using dev_to_node helper. Thanks, Ido. - Use new hardware commands from Patch 8/11 ("net/mlx5_core: Set/Query port MTU commands") to get/set port MTU in hardware. - Removed NETIF_F_SG since hardware ring wraparound is not supported - Use dma_wmb() when possible instead of wmb(). Thanks Alex. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/mlx5/device.h')
-rw-r--r--include/linux/mlx5/device.h198
1 files changed, 187 insertions, 11 deletions
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index abf65c790421..b288c538347a 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -35,6 +35,7 @@
#include <linux/types.h>
#include <rdma/ib_verbs.h>
+#include <linux/mlx5/mlx5_ifc.h>
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -58,6 +59,8 @@
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
@@ -70,6 +73,14 @@
<< __mlx5_dw_bit_off(typ, fld))); \
} while (0)
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
__mlx5_mask(typ, fld))
@@ -264,6 +275,7 @@ enum {
MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
MLX5_OPCODE_SEND = 0x0a,
MLX5_OPCODE_SEND_IMM = 0x0b,
+ MLX5_OPCODE_LSO = 0x0e,
MLX5_OPCODE_RDMA_READ = 0x10,
MLX5_OPCODE_ATOMIC_CS = 0x11,
MLX5_OPCODE_ATOMIC_FA = 0x12,
@@ -312,13 +324,6 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
-enum {
- HCA_CAP_OPMOD_GET_MAX = 0,
- HCA_CAP_OPMOD_GET_CUR = 1,
- HCA_CAP_OPMOD_GET_ODP_MAX = 4,
- HCA_CAP_OPMOD_GET_ODP_CUR = 5
-};
-
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@@ -541,6 +546,10 @@ struct mlx5_cmd_prot_block {
u8 sig;
};
+enum {
+ MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
struct mlx5_err_cqe {
u8 rsvd0[32];
__be32 srqn;
@@ -554,13 +563,22 @@ struct mlx5_err_cqe {
};
struct mlx5_cqe64 {
- u8 rsvd0[17];
+ u8 rsvd0[4];
+ u8 lro_tcppsh_abort_dupack;
+ u8 lro_min_ttl;
+ __be16 lro_tcp_win;
+ __be32 lro_ack_seq_num;
+ __be32 rss_hash_result;
+ u8 rss_hash_type;
u8 ml_path;
- u8 rsvd20[4];
+ u8 rsvd20[2];
+ __be16 check_sum;
__be16 slid;
__be32 flags_rqpn;
- u8 rsvd28[4];
- __be32 srqn;
+ u8 hds_ip_ext;
+ u8 l4_hdr_type_etc;
+ __be16 vlan_info;
+ __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
__be32 imm_inval_pkey;
u8 rsvd40[4];
__be32 byte_cnt;
@@ -571,6 +589,40 @@ struct mlx5_cqe64 {
u8 op_own;
};
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+ return !!(cqe->l4_hdr_type_etc & 0x1);
+}
+
+enum {
+ CQE_L4_HDR_TYPE_NONE = 0x0,
+ CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
+ CQE_L4_HDR_TYPE_UDP = 0x2,
+ CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
+ CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
+};
+
+enum {
+ CQE_RSS_HTYPE_IP = 0x3 << 6,
+ CQE_RSS_HTYPE_L4 = 0x3 << 2,
+};
+
+enum {
+ CQE_L2_OK = 1 << 0,
+ CQE_L3_OK = 1 << 1,
+ CQE_L4_OK = 1 << 2,
+};
+
struct mlx5_sig_err_cqe {
u8 rsvd0[16];
__be32 expected_trans_sig;
@@ -996,4 +1048,128 @@ struct mlx5_destroy_psv_out {
u8 rsvd[8];
};
+#define MLX5_CMD_OP_MAX 0x920
+
+enum {
+ VPORT_STATE_DOWN = 0x0,
+ VPORT_STATE_UP = 0x1,
+};
+
+enum {
+ MLX5_L3_PROT_TYPE_IPV4 = 0,
+ MLX5_L3_PROT_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_L4_PROT_TYPE_TCP = 0,
+ MLX5_L4_PROT_TYPE_UDP = 1,
+};
+
+enum {
+ MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
+ MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
+ MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
+ MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
+};
+
+enum {
+ MLX5_MATCH_OUTER_HEADERS = 1 << 0,
+ MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
+ MLX5_MATCH_INNER_HEADERS = 1 << 2,
+
+};
+
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
+ MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
+};
+
+enum {
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
+enum mlx5_cap_type {
+ MLX5_CAP_GENERAL = 0,
+ MLX5_CAP_ETHERNET_OFFLOADS,
+ MLX5_CAP_ODP,
+ MLX5_CAP_ATOMIC,
+ MLX5_CAP_ROCE,
+ MLX5_CAP_IPOIB_OFFLOADS,
+ MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_FLOW_TABLE,
+ /* NUM OF CAP Types */
+ MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+enum {
+ MLX5_CMD_STAT_OK = 0x0,
+ MLX5_CMD_STAT_INT_ERR = 0x1,
+ MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
+ MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
+ MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
+ MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
+ MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_LIM_ERR = 0x8,
+ MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
+ MLX5_CMD_STAT_IX_ERR = 0xa,
+ MLX5_CMD_STAT_NO_RES_ERR = 0xf,
+ MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
+ MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
+ MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
+ MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
+ MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
+};
+
#endif /* MLX5_DEVICE_H */