diff options
author | Maxime Ripard <maxime@cerno.tech> | 2020-11-02 13:17:54 +0300 |
---|---|---|
committer | Maxime Ripard <maxime@cerno.tech> | 2020-11-02 13:17:54 +0300 |
commit | c489573b5b6ce6442ad4658d9d5ec77839b91622 (patch) | |
tree | 6db35e19cfb4c1280033a5545c94e15ac9c7b863 /include/rdma | |
parent | d23bce963e7f17738374d700f0dc5464c5f7cba2 (diff) | |
parent | 3cea11cd5e3b00d91caf0b4730194039b45c5891 (diff) | |
download | linux-c489573b5b6ce6442ad4658d9d5ec77839b91622.tar.xz |
Merge drm/drm-next into drm-misc-next
Daniel needs -rc2 in drm-misc-next to merge some patches
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'include/rdma')
-rw-r--r-- | include/rdma/ib_cache.h | 3 | ||||
-rw-r--r-- | include/rdma/ib_cm.h | 3 | ||||
-rw-r--r-- | include/rdma/ib_umem.h | 46 | ||||
-rw-r--r-- | include/rdma/ib_umem_odp.h | 21 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 212 | ||||
-rw-r--r-- | include/rdma/rdma_cm.h | 60 | ||||
-rw-r--r-- | include/rdma/restrack.h | 21 |
7 files changed, 130 insertions, 236 deletions
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index 66a8f369a2fa..bae29f50adff 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -110,5 +110,8 @@ const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index); void rdma_put_gid_attr(const struct ib_gid_attr *attr); void rdma_hold_gid_attr(const struct ib_gid_attr *attr); +ssize_t rdma_query_gid_table(struct ib_device *device, + struct ib_uverbs_gid_entry *entries, + size_t max_entries); #endif /* _IB_CACHE_H */ diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 382427add677..e23eb357b761 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -14,9 +14,6 @@ #include <rdma/ib_sa.h> #include <rdma/rdma_cm.h> -/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */ -extern struct class cm_class; - enum ib_cm_state { IB_CM_IDLE, IB_CM_LISTEN, diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 71f573a418bf..70597508c765 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -17,6 +17,7 @@ struct ib_umem_odp; struct ib_umem { struct ib_device *ibdev; struct mm_struct *owning_mm; + u64 iova; size_t length; unsigned long address; u32 writable : 1; @@ -33,19 +34,46 @@ static inline int ib_umem_offset(struct ib_umem *umem) return umem->address & ~PAGE_MASK; } +static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, + unsigned long pgsz) +{ + return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - + ALIGN_DOWN(umem->iova, pgsz))) / + pgsz; +} + static inline size_t ib_umem_num_pages(struct ib_umem *umem) { - return (ALIGN(umem->address + umem->length, PAGE_SIZE) - - ALIGN_DOWN(umem->address, PAGE_SIZE)) >> - PAGE_SHIFT; + return ib_umem_num_dma_blocks(umem, PAGE_SIZE); +} + +static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, + struct ib_umem *umem, + unsigned long pgsz) +{ + __rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz); } +/** + * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem + * @umem: umem to iterate over + * @pgsz: Page size to split the list into + * + * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The + * returned DMA blocks will be aligned to pgsz and span the range: + * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) + * + * Performs exactly ib_umem_num_dma_blocks() iterations. + */ +#define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ + for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ + __rdma_block_iter_next(biter);) + #ifdef CONFIG_INFINIBAND_USER_MEM struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, size_t size, int access); void ib_umem_release(struct ib_umem *umem); -int ib_umem_page_count(struct ib_umem *umem); int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length); unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, @@ -63,15 +91,15 @@ static inline struct ib_umem *ib_umem_get(struct ib_device *device, return ERR_PTR(-EINVAL); } static inline void ib_umem_release(struct ib_umem *umem) { } -static inline int ib_umem_page_count(struct ib_umem *umem) { return 0; } static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, size_t length) { return -EINVAL; } -static inline int ib_umem_find_best_pgsz(struct ib_umem *umem, - unsigned long pgsz_bitmap, - unsigned long virt) { - return -EINVAL; +static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, + unsigned long pgsz_bitmap, + unsigned long virt) +{ + return 0; } #endif /* CONFIG_INFINIBAND_USER_MEM */ diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h index d16d2c17e733..0844c1d05ac6 100644 --- a/include/rdma/ib_umem_odp.h +++ b/include/rdma/ib_umem_odp.h @@ -14,17 +14,13 @@ struct ib_umem_odp { struct mmu_interval_notifier notifier; struct pid *tgid; + /* An array of the pfns included in the on-demand paging umem. */ + unsigned long *pfn_list; + /* - * An array of the pages included in the on-demand paging umem. - * Indices of pages that are currently not mapped into the device will - * contain NULL. - */ - struct page **page_list; - /* - * An array of the same size as page_list, with DMA addresses mapped - * for pages the pages in page_list. The lower two bits designate - * access permissions. See ODP_READ_ALLOWED_BIT and - * ODP_WRITE_ALLOWED_BIT. + * An array with DMA addresses mapped for pfns in pfn_list. + * The lower two bits designate access permissions. + * See ODP_READ_ALLOWED_BIT and ODP_WRITE_ALLOWED_BIT. */ dma_addr_t *dma_list; /* @@ -97,9 +93,8 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr, const struct mmu_interval_notifier_ops *ops); void ib_umem_odp_release(struct ib_umem_odp *umem_odp); -int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, - u64 bcnt, u64 access_mask, - unsigned long current_seq); +int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset, + u64 bcnt, u64 access_mask, bool fault); void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, u64 bound); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c0b2fa7e9b95..9bf6c319a670 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -138,10 +138,9 @@ union ib_gid { extern union ib_gid zgid; enum ib_gid_type { - /* If link layer is Ethernet, this is RoCE V1 */ - IB_GID_TYPE_IB = 0, - IB_GID_TYPE_ROCE = 0, - IB_GID_TYPE_ROCE_UDP_ENCAP = 1, + IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB, + IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1, + IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2, IB_GID_TYPE_SIZE }; @@ -180,7 +179,7 @@ rdma_node_get_transport(unsigned int node_type); enum rdma_network_type { RDMA_NETWORK_IB, - RDMA_NETWORK_ROCE_V1 = RDMA_NETWORK_IB, + RDMA_NETWORK_ROCE_V1, RDMA_NETWORK_IPV4, RDMA_NETWORK_IPV6 }; @@ -190,9 +189,10 @@ static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type net if (network_type == RDMA_NETWORK_IPV4 || network_type == RDMA_NETWORK_IPV6) return IB_GID_TYPE_ROCE_UDP_ENCAP; - - /* IB_GID_TYPE_IB same as RDMA_NETWORK_ROCE_V1 */ - return IB_GID_TYPE_IB; + else if (network_type == RDMA_NETWORK_ROCE_V1) + return IB_GID_TYPE_ROCE; + else + return IB_GID_TYPE_IB; } static inline enum rdma_network_type @@ -201,6 +201,9 @@ rdma_gid_attr_network_type(const struct ib_gid_attr *attr) if (attr->gid_type == IB_GID_TYPE_IB) return RDMA_NETWORK_IB; + if (attr->gid_type == IB_GID_TYPE_ROCE) + return RDMA_NETWORK_ROCE_V1; + if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid)) return RDMA_NETWORK_IPV4; else @@ -535,7 +538,8 @@ enum ib_port_speed { IB_SPEED_FDR10 = 8, IB_SPEED_FDR = 16, IB_SPEED_EDR = 32, - IB_SPEED_HDR = 64 + IB_SPEED_HDR = 64, + IB_SPEED_NDR = 128, }; /** @@ -669,7 +673,7 @@ struct ib_port_attr { u8 subnet_timeout; u8 init_type_reply; u8 active_width; - u8 active_speed; + u16 active_speed; u8 phys_state; u16 port_cap_flags2; }; @@ -952,13 +956,14 @@ enum ib_wc_status { const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); enum ib_wc_opcode { - IB_WC_SEND, - IB_WC_RDMA_WRITE, - IB_WC_RDMA_READ, - IB_WC_COMP_SWAP, - IB_WC_FETCH_ADD, - IB_WC_LSO, - IB_WC_LOCAL_INV, + IB_WC_SEND = IB_UVERBS_WC_SEND, + IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE, + IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ, + IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP, + IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD, + IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW, + IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV, + IB_WC_LSO = IB_UVERBS_WC_TSO, IB_WC_REG_MR, IB_WC_MASKED_COMP_SWAP, IB_WC_MASKED_FETCH_ADD, @@ -1291,6 +1296,7 @@ enum ib_wr_opcode { IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, + IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW, IB_WR_LSO = IB_UVERBS_WR_TSO, IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, @@ -1463,11 +1469,6 @@ enum rdma_remove_reason { RDMA_REMOVE_DRIVER_REMOVE, /* uobj is being cleaned-up before being committed */ RDMA_REMOVE_ABORT, - /* - * uobj has been fully created, with the uobj->object set, but is being - * cleaned up before being comitted - */ - RDMA_REMOVE_ABORT_HWOBJ, }; struct ib_rdmacg_object { @@ -1479,12 +1480,6 @@ struct ib_rdmacg_object { struct ib_ucontext { struct ib_device *device; struct ib_uverbs_file *ufile; - /* - * 'closing' can be read by the driver only during a destroy callback, - * it is set when we are closing the file descriptor and indicates - * that mm_sem may be locked. - */ - bool closing; bool cleanup_retryable; @@ -1863,17 +1858,6 @@ enum ib_flow_spec_type { #define IB_FLOW_SPEC_LAYER_MASK 0xF0 #define IB_FLOW_SPEC_SUPPORT_LAYERS 10 -/* Flow steering rule priority is set according to it's domain. - * Lower domain value means higher priority. - */ -enum ib_flow_domain { - IB_FLOW_DOMAIN_USER, - IB_FLOW_DOMAIN_ETHTOOL, - IB_FLOW_DOMAIN_RFS, - IB_FLOW_DOMAIN_NIC, - IB_FLOW_DOMAIN_NUM /* Must be last */ -}; - enum ib_flow_flags { IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */ IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */ @@ -2414,12 +2398,12 @@ struct ib_device_ops { void (*mmap_free)(struct rdma_user_mmap_entry *entry); void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); - void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); + int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr, struct ib_udata *udata); int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); - void (*destroy_ah)(struct ib_ah *ah, u32 flags); + int (*destroy_ah)(struct ib_ah *ah, u32 flags); int (*create_srq)(struct ib_srq *srq, struct ib_srq_init_attr *srq_init_attr, struct ib_udata *udata); @@ -2427,7 +2411,7 @@ struct ib_device_ops { enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata); int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr); - void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); + int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata); struct ib_qp *(*create_qp)(struct ib_pd *pd, struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata); @@ -2439,7 +2423,7 @@ struct ib_device_ops { int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); - void (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); + int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, @@ -2462,16 +2446,15 @@ struct ib_device_ops { unsigned int *sg_offset); int (*check_mr_status)(struct ib_mr *mr, u32 check_mask, struct ib_mr_status *mr_status); - struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, - struct ib_udata *udata); + int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata); int (*dealloc_mw)(struct ib_mw *mw); int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); - void (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); + int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata); struct ib_flow *(*create_flow)(struct ib_qp *qp, struct ib_flow_attr *flow_attr, - int domain, struct ib_udata *udata); + struct ib_udata *udata); int (*destroy_flow)(struct ib_flow *flow_id); struct ib_flow_action *(*create_flow_action_esp)( struct ib_device *device, @@ -2496,13 +2479,12 @@ struct ib_device_ops { struct ib_wq *(*create_wq)(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata); - void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); + int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, u32 wq_attr_mask, struct ib_udata *udata); - struct ib_rwq_ind_table *(*create_rwq_ind_table)( - struct ib_device *device, - struct ib_rwq_ind_table_init_attr *init_attr, - struct ib_udata *udata); + int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table, + struct ib_rwq_ind_table_init_attr *init_attr, + struct ib_udata *udata); int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); struct ib_dm *(*alloc_dm)(struct ib_device *device, struct ib_ucontext *context, @@ -2514,7 +2496,7 @@ struct ib_device_ops { struct uverbs_attr_bundle *attrs); int (*create_counters)(struct ib_counters *counters, struct uverbs_attr_bundle *attrs); - void (*destroy_counters)(struct ib_counters *counters); + int (*destroy_counters)(struct ib_counters *counters); int (*read_counters)(struct ib_counters *counters, struct ib_counters_read_attr *counters_read_attr, struct uverbs_attr_bundle *attrs); @@ -2624,7 +2606,9 @@ struct ib_device_ops { DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_cq); + DECLARE_RDMA_OBJ_SIZE(ib_mw); DECLARE_RDMA_OBJ_SIZE(ib_pd); + DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table); DECLARE_RDMA_OBJ_SIZE(ib_srq); DECLARE_RDMA_OBJ_SIZE(ib_ucontext); DECLARE_RDMA_OBJ_SIZE(ib_xrcd); @@ -2798,7 +2782,8 @@ void ib_dealloc_device(struct ib_device *device); void ib_get_device_fw_str(struct ib_device *device, char *str); -int ib_register_device(struct ib_device *device, const char *name); +int ib_register_device(struct ib_device *device, const char *name, + struct device *dma_device); void ib_unregister_device(struct ib_device *device); void ib_unregister_driver(enum rdma_driver_id driver_id); void ib_unregister_device_and_put(struct ib_device *device); @@ -3352,30 +3337,6 @@ static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num) } /** - * rdma_find_pg_bit - Find page bit given address and HW supported page sizes - * - * @addr: address - * @pgsz_bitmap: bitmap of HW supported page sizes - */ -static inline unsigned int rdma_find_pg_bit(unsigned long addr, - unsigned long pgsz_bitmap) -{ - unsigned long align; - unsigned long pgsz; - - align = addr & -addr; - - /* Find page bit such that addr is aligned to the highest supported - * HW page size - */ - pgsz = pgsz_bitmap & ~(-align << 1); - if (!pgsz) - return __ffs(pgsz_bitmap); - - return __fls(pgsz); -} - -/** * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not. * @device: Device * @port_num: 1 based Port number @@ -3472,12 +3433,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, #define ib_alloc_pd(device, flags) \ __ib_alloc_pd((device), (flags), KBUILD_MODNAME) -/** - * ib_dealloc_pd_user - Deallocate kernel/user PD - * @pd: The protection domain - * @udata: Valid user data or NULL for kernel objects - */ -void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); +int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); /** * ib_dealloc_pd - Deallocate kernel PD @@ -3487,7 +3443,9 @@ void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata); */ static inline void ib_dealloc_pd(struct ib_pd *pd) { - ib_dealloc_pd_user(pd, NULL); + int ret = ib_dealloc_pd_user(pd, NULL); + + WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail"); } enum rdma_create_ah_flags { @@ -3615,9 +3573,11 @@ int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata); * * NOTE: for user ah use rdma_destroy_ah_user with valid udata! */ -static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags) +static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags) { - return rdma_destroy_ah_user(ah, flags, NULL); + int ret = rdma_destroy_ah_user(ah, flags, NULL); + + WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail"); } struct ib_srq *ib_create_srq_user(struct ib_pd *pd, @@ -3671,9 +3631,11 @@ int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata); * * NOTE: for user srq use ib_destroy_srq_user with valid udata! */ -static inline int ib_destroy_srq(struct ib_srq *srq) +static inline void ib_destroy_srq(struct ib_srq *srq) { - return ib_destroy_srq_user(srq, NULL); + int ret = ib_destroy_srq_user(srq, NULL); + + WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail"); } /** @@ -3817,46 +3779,15 @@ static inline int ib_post_recv(struct ib_qp *qp, return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy); } -struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private, - int nr_cqe, int comp_vector, - enum ib_poll_context poll_ctx, - const char *caller, struct ib_udata *udata); - -/** - * ib_alloc_cq_user: Allocate kernel/user CQ - * @dev: The IB device - * @private: Private data attached to the CQE - * @nr_cqe: Number of CQEs in the CQ - * @comp_vector: Completion vector used for the IRQs - * @poll_ctx: Context used for polling the CQ - * @udata: Valid user data or NULL for kernel objects - */ -static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev, - void *private, int nr_cqe, - int comp_vector, - enum ib_poll_context poll_ctx, - struct ib_udata *udata) -{ - return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, - KBUILD_MODNAME, udata); -} - -/** - * ib_alloc_cq: Allocate kernel CQ - * @dev: The IB device - * @private: Private data attached to the CQE - * @nr_cqe: Number of CQEs in the CQ - * @comp_vector: Completion vector used for the IRQs - * @poll_ctx: Context used for polling the CQ - * - * NOTE: for user cq use ib_alloc_cq_user with valid udata! - */ +struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, + int comp_vector, enum ib_poll_context poll_ctx, + const char *caller); static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe, int comp_vector, enum ib_poll_context poll_ctx) { - return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx, - NULL); + return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx, + KBUILD_MODNAME); } struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private, @@ -3878,26 +3809,7 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, KBUILD_MODNAME); } -/** - * ib_free_cq_user - Free kernel/user CQ - * @cq: The CQ to free - * @udata: Valid user data or NULL for kernel objects - * - * NOTE: This function shouldn't be called on shared CQs. - */ -void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); - -/** - * ib_free_cq - Free kernel CQ - * @cq: The CQ to free - * - * NOTE: for user cq use ib_free_cq_user with valid udata! - */ -static inline void ib_free_cq(struct ib_cq *cq) -{ - ib_free_cq_user(cq, NULL); -} - +void ib_free_cq(struct ib_cq *cq); int ib_process_cq_direct(struct ib_cq *cq, int budget); /** @@ -3955,7 +3867,9 @@ int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata); */ static inline void ib_destroy_cq(struct ib_cq *cq) { - ib_destroy_cq_user(cq, NULL); + int ret = ib_destroy_cq_user(cq, NULL); + + WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail"); } /** @@ -4379,10 +4293,9 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port); struct ib_wq *ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr); -int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); +int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata); int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, u32 wq_attr_mask); -int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size); @@ -4410,7 +4323,7 @@ void ib_drain_rq(struct ib_qp *qp); void ib_drain_sq(struct ib_qp *qp); void ib_drain_qp(struct ib_qp *qp); -int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width); +int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width); static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr) { @@ -4717,6 +4630,7 @@ bool rdma_dev_access_netns(const struct ib_device *device, const struct net *net); #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) /** diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index cf5da2ae49bf..32a67af18415 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -110,11 +110,14 @@ struct rdma_cm_id { u8 port_num; }; -struct rdma_cm_id *__rdma_create_id(struct net *net, - rdma_cm_event_handler event_handler, - void *context, enum rdma_ucm_port_space ps, - enum ib_qp_type qp_type, - const char *caller); +struct rdma_cm_id * +__rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler, + void *context, enum rdma_ucm_port_space ps, + enum ib_qp_type qp_type, const char *caller); +struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler, + void *context, + enum rdma_ucm_port_space ps, + enum ib_qp_type qp_type); /** * rdma_create_id - Create an RDMA identifier. @@ -132,9 +135,9 @@ struct rdma_cm_id *__rdma_create_id(struct net *net, * The event handler callback serializes on the id's mutex and is * allowed to sleep. */ -#define rdma_create_id(net, event_handler, context, ps, qp_type) \ - __rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \ - KBUILD_MODNAME) +#define rdma_create_id(net, event_handler, context, ps, qp_type) \ + __rdma_create_kernel_id(net, event_handler, context, ps, qp_type, \ + KBUILD_MODNAME) /** * rdma_destroy_id - Destroys an RDMA identifier. @@ -224,19 +227,9 @@ void rdma_destroy_qp(struct rdma_cm_id *id); int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int *qp_attr_mask); -/** - * rdma_connect - Initiate an active connection request. - * @id: Connection identifier to connect. - * @conn_param: Connection information used for connected QPs. - * - * Users must have resolved a route for the rdma_cm_id to connect with - * by having called rdma_resolve_route before calling this routine. - * - * This call will either connect to a remote QP or obtain remote QP - * information for unconnected rdma_cm_id's. The actual operation is - * based on the rdma_cm_id's port space. - */ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); +int rdma_connect_locked(struct rdma_cm_id *id, + struct rdma_conn_param *conn_param); int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, struct rdma_ucm_ece *ece); @@ -250,29 +243,12 @@ int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, */ int rdma_listen(struct rdma_cm_id *id, int backlog); -int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, - const char *caller); - -int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, - const char *caller, struct rdma_ucm_ece *ece); +int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); -/** - * rdma_accept - Called to accept a connection request or response. - * @id: Connection identifier associated with the request. - * @conn_param: Information needed to establish the connection. This must be - * provided if accepting a connection request. If accepting a connection - * response, this parameter must be NULL. - * - * Typically, this routine is only called by the listener to accept a connection - * request. It must also be called on the active side of a connection if the - * user is performing their own QP transitions. - * - * In the case of error, a reject message is sent to the remote side and the - * state of the qp associated with the id is modified to error, such that any - * previously posted receive buffers would be flushed. - */ -#define rdma_accept(id, conn_param) \ - __rdma_accept((id), (conn_param), KBUILD_MODNAME) +void rdma_lock_handler(struct rdma_cm_id *id); +void rdma_unlock_handler(struct rdma_cm_id *id); +int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, + struct rdma_ucm_ece *ece); /** * rdma_notify - Notifies the RDMA CM of an asynchronous event that has diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index 7682d1bcf789..d3a1cc5be7bc 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -106,22 +106,11 @@ struct rdma_restrack_entry { int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type); - -void rdma_restrack_kadd(struct rdma_restrack_entry *res); -void rdma_restrack_uadd(struct rdma_restrack_entry *res); - -/** - * rdma_restrack_del() - delete object from the reource tracking database - * @res: resource entry - * @type: actual type of object to operate - */ -void rdma_restrack_del(struct rdma_restrack_entry *res); - /** * rdma_is_kernel_res() - check the owner of resource * @res: resource entry */ -static inline bool rdma_is_kernel_res(struct rdma_restrack_entry *res) +static inline bool rdma_is_kernel_res(const struct rdma_restrack_entry *res) { return !res->user; } @@ -138,14 +127,6 @@ int __must_check rdma_restrack_get(struct rdma_restrack_entry *res); */ int rdma_restrack_put(struct rdma_restrack_entry *res); -/** - * rdma_restrack_set_task() - set the task for this resource - * @res: resource entry - * @caller: kernel name, the current task will be used if the caller is NULL. - */ -void rdma_restrack_set_task(struct rdma_restrack_entry *res, - const char *caller); - /* * Helper functions for rdma drivers when filling out * nldev driver attributes. |