summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mana/mana_ib.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mana/mana_ib.h')
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h37
1 files changed, 37 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6903946677e5..5d31034ac7fb 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
u32 max_recv_sge_count;
u32 max_inline_data_size;
u64 feature_flags;
+ u64 page_size_cap;
};
struct mana_ib_queue {
@@ -209,6 +210,7 @@ enum mana_ib_command_code {
MANA_IB_DESTROY_RC_QP = 0x3000b,
MANA_IB_SET_QP_STATE = 0x3000d,
MANA_IB_QUERY_VF_COUNTERS = 0x30022,
+ MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
};
struct mana_ib_query_adapter_caps_req {
@@ -217,6 +219,8 @@ struct mana_ib_query_adapter_caps_req {
enum mana_ib_adapter_features {
MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
+ MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
+ MANA_IB_FEATURE_MULTI_PORTS_SUPPORT = BIT(6),
};
struct mana_ib_query_adapter_caps_resp {
@@ -513,6 +517,31 @@ struct mana_rnic_query_vf_cntrs_resp {
u64 rate_inc_events;
u64 num_qps_recovered;
u64 current_rate;
+ u64 dup_rx_req;
+ u64 tx_bytes;
+ u64 rx_bytes;
+ u64 rx_send_req;
+ u64 rx_write_req;
+ u64 rx_read_req;
+ u64 tx_pkt;
+ u64 rx_pkt;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_resp {
+ struct gdma_resp_hdr hdr;
+ u32 sent_cnps;
+ u32 received_ecns;
+ u32 reserved1;
+ u32 received_cnp_count;
+ u32 qp_congested_events;
+ u32 qp_recovered_events;
+ u32 rate_inc_events;
+ u32 reserved2;
}; /* HW Data */
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
@@ -543,6 +572,11 @@ static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
complete(&qp->free);
}
+static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
+}
+
static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
@@ -599,6 +633,7 @@ struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags);
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
@@ -642,6 +677,7 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
int mana_ib_create_eqs(struct mana_ib_dev *mdev);
@@ -687,5 +723,6 @@ int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
#endif