summaryrefslogtreecommitdiff
path: root/include/linux/mlx5/driver.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mlx5/driver.h')
-rw-r--r--include/linux/mlx5/driver.h261
1 files changed, 209 insertions, 52 deletions
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index ecc451d89ccd..2fcff6b4503f 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -42,6 +42,7 @@
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
#include <linux/workqueue.h>
+#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/mlx5/device.h>
@@ -83,6 +84,7 @@ enum {
MLX5_EQ_VEC_PAGES = 0,
MLX5_EQ_VEC_CMD = 1,
MLX5_EQ_VEC_ASYNC = 2,
+ MLX5_EQ_VEC_PFAULT = 3,
MLX5_EQ_VEC_COMP_BASE,
};
@@ -104,6 +106,8 @@ enum {
enum {
MLX5_REG_QETCR = 0x4005,
MLX5_REG_QTCT = 0x400a,
+ MLX5_REG_DCBX_PARAM = 0x4020,
+ MLX5_REG_DCBX_APP = 0x4021,
MLX5_REG_PCAP = 0x5001,
MLX5_REG_PMTU = 0x5003,
MLX5_REG_PTYS = 0x5004,
@@ -117,10 +121,20 @@ enum {
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MPCNT = 0x9051,
+ MLX5_REG_MTPPS = 0x9053,
+ MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MCAM = 0x907f,
+};
+
+enum mlx5_dcbx_oper_mode {
+ MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
+ MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
enum {
@@ -163,6 +177,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_PKEY_CHANGE,
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
+ MLX5_DEV_EVENT_PPS,
};
enum mlx5_port_status {
@@ -170,36 +185,26 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-struct mlx5_uuar_info {
- struct mlx5_uar *uars;
- int num_uars;
- int num_low_latency_uuars;
- unsigned long *bitmap;
+enum mlx5_eq_type {
+ MLX5_EQ_TYPE_COMP,
+ MLX5_EQ_TYPE_ASYNC,
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ MLX5_EQ_TYPE_PF,
+#endif
+};
+
+struct mlx5_bfreg_info {
+ u32 *sys_pages;
+ int num_low_latency_bfregs;
unsigned int *count;
- struct mlx5_bf *bfs;
/*
- * protect uuar allocation data structs
+ * protect bfreg allocation data structs
*/
struct mutex lock;
u32 ver;
-};
-
-struct mlx5_bf {
- void __iomem *reg;
- void __iomem *regreg;
- int buf_size;
- struct mlx5_uar *uar;
- unsigned long offset;
- int need_lock;
- /* protect blue flame buffer selection when needed
- */
- spinlock_t lock;
-
- /* serialize 64 bit writes when done as two 32 bit accesses
- */
- spinlock_t lock32;
- int uuarn;
+ bool lib_uar_4k;
+ u32 num_sys_pages;
};
struct mlx5_cmd_first {
@@ -208,7 +213,7 @@ struct mlx5_cmd_first {
struct mlx5_cmd_msg {
struct list_head list;
- struct cache_ent *cache;
+ struct cmd_msg_cache *parent;
u32 len;
struct mlx5_cmd_first first;
struct mlx5_cmd_mailbox *next;
@@ -228,17 +233,17 @@ struct mlx5_cmd_debug {
u16 outlen;
};
-struct cache_ent {
+struct cmd_msg_cache {
/* protect block chain allocations
*/
spinlock_t lock;
struct list_head head;
+ unsigned int max_inbox_size;
+ unsigned int num_ent;
};
-struct cmd_msg_cache {
- struct cache_ent large;
- struct cache_ent med;
-
+enum {
+ MLX5_NUM_COMMAND_CACHES = 5,
};
struct mlx5_cmd_stats {
@@ -281,7 +286,7 @@ struct mlx5_cmd {
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct pci_pool *pool;
struct mlx5_cmd_debug dbg;
- struct cmd_msg_cache cache;
+ struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
int checksum_disabled;
struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
};
@@ -290,6 +295,7 @@ struct mlx5_port_caps {
int gid_table_len;
int pkey_table_len;
u8 ext_port_cap;
+ bool has_smi;
};
struct mlx5_cmd_mailbox {
@@ -310,6 +316,13 @@ struct mlx5_buf {
u8 page_shift;
};
+struct mlx5_frag_buf {
+ struct mlx5_buf_list *frags;
+ int npages;
+ int size;
+ u8 page_shift;
+};
+
struct mlx5_eq_tasklet {
struct list_head list;
struct list_head process_list;
@@ -318,6 +331,14 @@ struct mlx5_eq_tasklet {
spinlock_t lock;
};
+struct mlx5_eq_pagefault {
+ struct work_struct work;
+ /* Pagefaults lock */
+ spinlock_t lock;
+ struct workqueue_struct *wq;
+ mempool_t *pool;
+};
+
struct mlx5_eq {
struct mlx5_core_dev *dev;
__be32 __iomem *doorbell;
@@ -331,7 +352,13 @@ struct mlx5_eq {
struct list_head list;
int index;
struct mlx5_rsc_debug *dbg;
- struct mlx5_eq_tasklet tasklet_ctx;
+ enum mlx5_eq_type type;
+ union {
+ struct mlx5_eq_tasklet tasklet_ctx;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct mlx5_eq_pagefault pf_ctx;
+#endif
+ };
};
struct mlx5_core_psv {
@@ -355,13 +382,21 @@ struct mlx5_core_sig_ctx {
u32 sigerr_count;
};
+enum {
+ MLX5_MKEY_MR = 1,
+ MLX5_MKEY_MW,
+};
+
struct mlx5_core_mkey {
u64 iova;
u64 size;
u32 key;
u32 pd;
+ u32 type;
};
+#define MLX5_24BIT_MASK ((1 << 24) - 1)
+
enum mlx5_res_type {
MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
@@ -396,20 +431,47 @@ struct mlx5_eq_table {
struct mlx5_eq pages_eq;
struct mlx5_eq async_eq;
struct mlx5_eq cmd_eq;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ struct mlx5_eq pfault_eq;
+#endif
int num_comp_vectors;
/* protect EQs list
*/
spinlock_t lock;
};
-struct mlx5_uar {
- u32 index;
- struct list_head bf_list;
- unsigned free_bf_bmap;
- void __iomem *bf_map;
+struct mlx5_uars_page {
void __iomem *map;
+ bool wc;
+ u32 index;
+ struct list_head list;
+ unsigned int bfregs;
+ unsigned long *reg_bitmap; /* for non fast path bf regs */
+ unsigned long *fp_bitmap;
+ unsigned int reg_avail;
+ unsigned int fp_avail;
+ struct kref ref_count;
+ struct mlx5_core_dev *mdev;
};
+struct mlx5_bfreg_head {
+ /* protect blue flame registers allocations */
+ struct mutex lock;
+ struct list_head list;
+};
+
+struct mlx5_bfreg_data {
+ struct mlx5_bfreg_head reg_head;
+ struct mlx5_bfreg_head wc_head;
+};
+
+struct mlx5_sq_bfreg {
+ void __iomem *map;
+ struct mlx5_uars_page *up;
+ bool wc;
+ u32 index;
+ unsigned int offset;
+};
struct mlx5_core_health {
struct health_buffer __iomem *health;
@@ -482,6 +544,7 @@ struct mlx5_fc_stats {
struct mlx5_eswitch;
struct mlx5_lag;
+struct mlx5_pagefault;
struct mlx5_rl_entry {
u32 rate;
@@ -498,13 +561,36 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
+enum port_module_event_status_type {
+ MLX5_MODULE_STATUS_PLUGGED = 0x1,
+ MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+ MLX5_MODULE_STATUS_ERROR = 0x3,
+ MLX5_MODULE_STATUS_NUM = 0x3,
+};
+
+enum port_module_event_error_type {
+ MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
+ MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
+ MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
+ MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
+ MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
+ MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
+ MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN,
+ MLX5_MODULE_EVENT_ERROR_NUM,
+};
+
+struct mlx5_port_module_event_stats {
+ u64 status_counters[MLX5_MODULE_STATUS_NUM];
+ u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+};
+
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table eq_table;
struct msix_entry *msix_arr;
struct mlx5_irq_info *irq_info;
- struct mlx5_uuar_info uuari;
- MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
/* pages stuff */
struct workqueue_struct *pg_wq;
@@ -559,6 +645,18 @@ struct mlx5_priv {
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
+
+ struct mlx5_port_module_event_stats pme_stats;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ void (*pfault)(struct mlx5_core_dev *dev,
+ void *context,
+ struct mlx5_pagefault *pfault);
+ void *pfault_ctx;
+ struct srcu_struct pfault_srcu;
+#endif
+ struct mlx5_bfreg_data bfregs;
+ struct mlx5_uars_page *uar;
};
enum mlx5_device_state {
@@ -577,13 +675,56 @@ enum mlx5_pci_status {
MLX5_PCI_STATUS_ENABLED,
};
+enum mlx5_pagefault_type_flags {
+ MLX5_PFAULT_REQUESTOR = 1 << 0,
+ MLX5_PFAULT_WRITE = 1 << 1,
+ MLX5_PFAULT_RDMA = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u32 token;
+ u8 event_subtype;
+ u8 type;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * Number of resource holding WQE, depends on type.
+ */
+ u32 wq_num;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+
+ struct mlx5_eq *eq;
+ struct work_struct work;
+};
+
struct mlx5_td {
struct list_head tirs_list;
u32 tdn;
};
struct mlx5e_resources {
- struct mlx5_uar cq_uar;
u32 pdn;
struct mlx5_td td;
struct mlx5_core_mkey mkey;
@@ -598,8 +739,12 @@ struct mlx5_core_dev {
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
- u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct {
+ u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
+ u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ } caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
@@ -773,11 +918,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
-int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
- bool map_wc);
-void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
@@ -787,6 +927,9 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+ struct mlx5_frag_buf *buf, int node);
+void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -831,17 +974,16 @@ void mlx5_unregister_debugfs(void);
int mlx5_eq_init(struct mlx5_core_dev *dev);
void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
-#endif
void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name, struct mlx5_uar *uar);
+ int nent, u64 mask, const char *name,
+ enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev);
@@ -880,12 +1022,19 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
struct mlx5_odp_caps *odp_caps);
int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
u8 port_num, void *out, size_t sz);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
+ u32 wq_num, u8 type, int error);
+#endif
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+ bool map_wc, bool fast_path);
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
static inline int fw_initializing(struct mlx5_core_dev *dev)
{
@@ -913,7 +1062,10 @@ enum {
};
enum {
- MAX_MR_CACHE_ENTRIES = 16,
+ MAX_UMR_CACHE_ENTRY = 20,
+ MLX5_IMR_MTT_CACHE_ENTRY,
+ MLX5_IMR_KSM_CACHE_ENTRY,
+ MAX_MR_CACHE_ENTRIES
};
enum {
@@ -928,6 +1080,9 @@ struct mlx5_interface {
void (*detach)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, unsigned long param);
+ void (*pfault)(struct mlx5_core_dev *dev,
+ void *context,
+ struct mlx5_pagefault *pfault);
void * (*get_dev)(void *context);
int protocol;
struct list_head list;
@@ -942,6 +1097,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
struct mlx5_profile {
u64 mask;