summaryrefslogtreecommitdiff
path: root/include/rdma
diff options
context:
space:
mode:
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_cm.h17
-rw-r--r--include/rdma/ib_ucaps.h30
-rw-r--r--include/rdma/ib_umem_odp.h25
-rw-r--r--include/rdma/ib_verbs.h55
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/rdma/rdma_counter.h7
-rw-r--r--include/rdma/uverbs_std_types.h2
7 files changed, 87 insertions, 50 deletions
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index a2ac62b4a6cf..1fa3786f82f4 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -480,23 +480,12 @@ int ib_send_cm_rej(struct ib_cm_id *cm_id,
const void *private_data,
u8 private_data_len);
-#define IB_CM_MRA_FLAG_DELAY 0x80 /* Send MRA only after a duplicate msg */
-
/**
- * ib_send_cm_mra - Sends a message receipt acknowledgement to a connection
- * message.
+ * ib_prepare_cm_mra - Prepares to send a message receipt acknowledgment to a
+ connection message in case duplicates are received.
* @cm_id: Connection identifier associated with the connection message.
- * @service_timeout: The lower 5-bits specify the maximum time required for
- * the sender to reply to the connection message. The upper 3-bits
- * specify additional control flags.
- * @private_data: Optional user-defined private data sent with the
- * message receipt acknowledgement.
- * @private_data_len: Size of the private data buffer, in bytes.
*/
-int ib_send_cm_mra(struct ib_cm_id *cm_id,
- u8 service_timeout,
- const void *private_data,
- u8 private_data_len);
+int ib_prepare_cm_mra(struct ib_cm_id *cm_id);
/**
* ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
diff --git a/include/rdma/ib_ucaps.h b/include/rdma/ib_ucaps.h
new file mode 100644
index 000000000000..d9f96be3a553
--- /dev/null
+++ b/include/rdma/ib_ucaps.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
+ */
+
+#ifndef _IB_UCAPS_H_
+#define _IB_UCAPS_H_
+
+#define UCAP_ENABLED(ucaps, type) (!!((ucaps) & (1U << (type))))
+
+enum rdma_user_cap {
+ RDMA_UCAP_MLX5_CTRL_LOCAL,
+ RDMA_UCAP_MLX5_CTRL_OTHER_VHCA,
+ RDMA_UCAP_MAX
+};
+
+void ib_cleanup_ucaps(void);
+int ib_get_ucaps(int *fds, int fd_count, uint64_t *idx_mask);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int ib_create_ucap(enum rdma_user_cap type);
+void ib_remove_ucap(enum rdma_user_cap type);
+#else
+static inline int ib_create_ucap(enum rdma_user_cap type)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ib_remove_ucap(enum rdma_user_cap type) {}
+#endif /* CONFIG_INFINIBAND_USER_ACCESS */
+
+#endif /* _IB_UCAPS_H_ */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 0844c1d05ac6..2a24bf791c10 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -8,23 +8,17 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_verbs.h>
+#include <linux/hmm-dma.h>
struct ib_umem_odp {
struct ib_umem umem;
struct mmu_interval_notifier notifier;
struct pid *tgid;
- /* An array of the pfns included in the on-demand paging umem. */
- unsigned long *pfn_list;
+ struct hmm_dma_map map;
/*
- * An array with DMA addresses mapped for pfns in pfn_list.
- * The lower two bits designate access permissions.
- * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT.
- */
- dma_addr_t *dma_list;
- /*
- * The umem_mutex protects the page_list and dma_list fields of an ODP
+ * The umem_mutex protects the page_list field of an ODP
* umem, allowing only a single thread to map/unmap pages. The mutex
* also protects access to the mmu notifier counters.
*/
@@ -67,19 +61,6 @@ static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
umem_odp->page_shift;
}
-/*
- * The lower 2 bits of the DMA address signal the R/W permissions for
- * the entry. To upgrade the permissions, provide the appropriate
- * bitmask to the map_dma_pages function.
- *
- * Be aware that upgrading a mapped address might result in change of
- * the DMA address for the page.
- */
-#define ODP_READ_ALLOWED_BIT (1<<0ULL)
-#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
-
-#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
-
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_umem_odp *
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0ad104dae253..af43a8d2a74a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -314,17 +314,19 @@ enum ib_atomic_cap {
};
enum ib_odp_general_cap_bits {
- IB_ODP_SUPPORT = 1 << 0,
- IB_ODP_SUPPORT_IMPLICIT = 1 << 1,
+ IB_ODP_SUPPORT = IB_UVERBS_ODP_SUPPORT,
+ IB_ODP_SUPPORT_IMPLICIT = IB_UVERBS_ODP_SUPPORT_IMPLICIT,
};
enum ib_odp_transport_cap_bits {
- IB_ODP_SUPPORT_SEND = 1 << 0,
- IB_ODP_SUPPORT_RECV = 1 << 1,
- IB_ODP_SUPPORT_WRITE = 1 << 2,
- IB_ODP_SUPPORT_READ = 1 << 3,
- IB_ODP_SUPPORT_ATOMIC = 1 << 4,
- IB_ODP_SUPPORT_SRQ_RECV = 1 << 5,
+ IB_ODP_SUPPORT_SEND = IB_UVERBS_ODP_SUPPORT_SEND,
+ IB_ODP_SUPPORT_RECV = IB_UVERBS_ODP_SUPPORT_RECV,
+ IB_ODP_SUPPORT_WRITE = IB_UVERBS_ODP_SUPPORT_WRITE,
+ IB_ODP_SUPPORT_READ = IB_UVERBS_ODP_SUPPORT_READ,
+ IB_ODP_SUPPORT_ATOMIC = IB_UVERBS_ODP_SUPPORT_ATOMIC,
+ IB_ODP_SUPPORT_SRQ_RECV = IB_UVERBS_ODP_SUPPORT_SRQ_RECV,
+ IB_ODP_SUPPORT_FLUSH = IB_UVERBS_ODP_SUPPORT_FLUSH,
+ IB_ODP_SUPPORT_ATOMIC_WRITE = IB_UVERBS_ODP_SUPPORT_ATOMIC_WRITE,
};
struct ib_odp_caps {
@@ -519,6 +521,23 @@ enum ib_port_state {
IB_PORT_ACTIVE_DEFER = 5
};
+static inline const char *__attribute_const__
+ib_port_state_to_str(enum ib_port_state state)
+{
+ const char * const states[] = {
+ [IB_PORT_NOP] = "NOP",
+ [IB_PORT_DOWN] = "DOWN",
+ [IB_PORT_INIT] = "INIT",
+ [IB_PORT_ARMED] = "ARMED",
+ [IB_PORT_ACTIVE] = "ACTIVE",
+ [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER",
+ };
+
+ if (state < ARRAY_SIZE(states))
+ return states[state];
+ return "UNKNOWN";
+}
+
enum ib_port_phys_state {
IB_PORT_PHYS_STATE_SLEEP = 1,
IB_PORT_PHYS_STATE_POLLING = 2,
@@ -1513,6 +1532,7 @@ struct ib_ucontext {
struct ib_uverbs_file *ufile;
struct ib_rdmacg_object cg_obj;
+ u64 enabled_caps;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -2626,12 +2646,13 @@ struct ib_device_ops {
* @counter - The counter to be bound. If counter->id is zero then
* the driver needs to allocate a new counter and set counter->id
*/
- int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp);
+ int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp,
+ u32 port);
/**
* counter_unbind_qp - Unbind the qp from the dynamically-allocated
* counter and bind it onto the default one
*/
- int (*counter_unbind_qp)(struct ib_qp *qp);
+ int (*counter_unbind_qp)(struct ib_qp *qp, u32 port);
/**
* counter_dealloc -De-allocate the hw counter
*/
@@ -2648,6 +2669,11 @@ struct ib_device_ops {
int (*counter_update_stats)(struct rdma_counter *counter);
/**
+ * counter_init - Initialize the driver specific rdma counter struct.
+ */
+ void (*counter_init)(struct rdma_counter *counter);
+
+ /**
* Allows rdma drivers to add their own restrack attributes
* dumped via 'rdma stat' iproute2 command.
*/
@@ -2698,6 +2724,7 @@ struct ib_device_ops {
DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
+ DECLARE_RDMA_OBJ_SIZE(rdma_counter);
};
struct ib_core_device {
@@ -2750,6 +2777,7 @@ struct ib_device {
* It is a NULL terminated array.
*/
const struct attribute_group *groups[4];
+ u8 hw_stats_attr_index;
u64 uverbs_cmd_mask;
@@ -4764,7 +4792,14 @@ void roce_del_all_netdev_gids(struct ib_device *ib_dev,
struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
+#else
+static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
+{
+ return 0;
+}
+#endif
struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
enum rdma_netdev_t type, const char *name,
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 8a8ab2f793ab..d1593ad47e28 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -388,6 +388,5 @@ void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
union ib_gid *dgid);
struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *cm_id);
-struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res);
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_counter.h b/include/rdma/rdma_counter.h
index 45d5481a7846..4204d08a010a 100644
--- a/include/rdma/rdma_counter.h
+++ b/include/rdma/rdma_counter.h
@@ -23,6 +23,7 @@ struct rdma_counter_mode {
enum rdma_nl_counter_mode mode;
enum rdma_nl_counter_mask mask;
struct auto_mode_param param;
+ bool bind_opcnt;
};
struct rdma_port_counter {
@@ -47,9 +48,10 @@ void rdma_counter_init(struct ib_device *dev);
void rdma_counter_release(struct ib_device *dev);
int rdma_counter_set_auto_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mask mask,
+ bool bind_opcnt,
struct netlink_ext_ack *extack);
int rdma_counter_bind_qp_auto(struct ib_qp *qp, u32 port);
-int rdma_counter_unbind_qp(struct ib_qp *qp, bool force);
+int rdma_counter_unbind_qp(struct ib_qp *qp, u32 port, bool force);
int rdma_counter_query_stats(struct rdma_counter *counter);
u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u32 port, u32 index);
@@ -61,7 +63,8 @@ int rdma_counter_unbind_qpn(struct ib_device *dev, u32 port,
u32 qp_num, u32 counter_id);
int rdma_counter_get_mode(struct ib_device *dev, u32 port,
enum rdma_nl_counter_mode *mode,
- enum rdma_nl_counter_mask *mask);
+ enum rdma_nl_counter_mask *mask,
+ bool *opcnt);
int rdma_counter_modify(struct ib_device *dev, u32 port,
unsigned int index, bool enable);
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index fe0512116958..555ea3d142a4 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -34,7 +34,7 @@
static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
{
if (IS_ERR(uobj))
- return NULL;
+ return ERR_CAST(uobj);
return uobj->object;
}
#define uobj_get_obj_read(_object, _type, _id, _attrs) \