diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-29 01:57:10 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-29 01:57:10 +0300 |
commit | 5d24ae67a961c51beb255a28c9c417d9710247c2 (patch) | |
tree | c23c71b2f17f4502554c80b84be476e4c08f7160 /drivers/infiniband/core | |
parent | 938edb8a31b976c9a92eb0cd4ff481e93f76c1f1 (diff) | |
parent | f617e5ffe04fd46010b618c9eeadaa04588704c9 (diff) | |
download | linux-5d24ae67a961c51beb255a28c9c417d9710247c2.tar.xz |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This has been a fairly typical cycle, with the usual sorts of driver
updates. Several series continue to come through which improve and
modernize various parts of the core code, and we finally are starting
to get the uAPI command interface cleaned up.
- Various driver fixes for bnxt_re, cxgb3/4, hfi1, hns, i40iw, mlx4,
mlx5, qib, rxe, usnic
- Rework the entire syscall flow for uverbs to be able to run over
ioctl(). Finally getting past the historic bad choice to use
write() for command execution
- More functional coverage with the mlx5 'devx' user API
- Start of the HFI1 series for 'TID RDMA'
- SRQ support in the hns driver
- Support for new IBTA defined 2x lane widths
- A big series to consolidate all the driver function pointers into a
big struct and have drivers provide a 'static const' version of the
struct instead of open coding initialization
- New 'advise_mr' uAPI to control device caching/loading of page
tables
- Support for inline data in SRPT
- Modernize how umad uses the driver core and creates cdev's and
sysfs files
- First steps toward removing 'uobject' from the view of the drivers"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (193 commits)
RDMA/srpt: Use kmem_cache_free() instead of kfree()
RDMA/mlx5: Signedness bug in UVERBS_HANDLER()
IB/uverbs: Signedness bug in UVERBS_HANDLER()
IB/mlx5: Allocate the per-port Q counter shared when DEVX is supported
IB/umad: Start using dev_groups of class
IB/umad: Use class_groups and let core create class file
IB/umad: Refactor code to use cdev_device_add()
IB/umad: Avoid destroying device while it is accessed
IB/umad: Simplify and avoid dynamic allocation of class
IB/mlx5: Fix wrong error unwind
IB/mlx4: Remove set but not used variable 'pd'
RDMA/iwcm: Don't copy past the end of dev_name() string
IB/mlx5: Fix long EEH recover time with NVMe offloads
IB/mlx5: Simplify netdev unbinding
IB/core: Move query port to ioctl
RDMA/nldev: Expose port_cap_flags2
IB/core: uverbs copy to struct or zero helper
IB/rxe: Reuse code which sets port state
IB/rxe: Make counters thread safe
IB/mlx5: Use the correct commands for UMEM and UCTX allocation
...
Diffstat (limited to 'drivers/infiniband/core')
38 files changed, 2672 insertions, 1847 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 867cee5e27b2..69dee36e0e89 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -38,4 +38,4 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \ uverbs_std_types_cq.o \ uverbs_std_types_flow_action.o uverbs_std_types_dm.o \ uverbs_std_types_mr.o uverbs_std_types_counters.o \ - uverbs_uapi.o + uverbs_uapi.o uverbs_std_types_device.o diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 324ef85a13b6..f82b4260de42 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -137,13 +137,13 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh * err2: ib_free_send_mad(send_buf); err1: - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); } static void agent_send_handler(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc) { - rdma_destroy_ah(mad_send_wc->send_buf->ah); + rdma_destroy_ah(mad_send_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(mad_send_wc->send_buf); } diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 5b2fce4a7091..7b04590f307f 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -215,10 +215,6 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry) dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__, port_num, entry->attr.index, entry->attr.gid.raw); - if (rdma_cap_roce_gid_table(device, port_num) && - entry->state != GID_TABLE_ENTRY_INVALID) - device->del_gid(&entry->attr, &entry->context); - write_lock_irq(&table->rwlock); /* @@ -324,7 +320,7 @@ static int add_roce_gid(struct ib_gid_table_entry *entry) return -EINVAL; } if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) { - ret = attr->device->add_gid(attr, &entry->context); + ret = attr->device->ops.add_gid(attr, &entry->context); if (ret) { dev_err(&attr->device->dev, "%s GID add failed port=%d index=%d\n", @@ -364,6 +360,9 @@ static void del_gid(struct ib_device *ib_dev, u8 port, table->data_vec[ix] = NULL; write_unlock_irq(&table->rwlock); + if (rdma_cap_roce_gid_table(ib_dev, port)) + ib_dev->ops.del_gid(&entry->attr, &entry->context); + put_gid_entry_locked(entry); } @@ -548,8 +547,8 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, unsigned long mask; int ret; - if (ib_dev->get_netdev) { - idev = ib_dev->get_netdev(ib_dev, port); + if (ib_dev->ops.get_netdev) { + idev = ib_dev->ops.get_netdev(ib_dev, port); if (idev && attr->ndev != idev) { union ib_gid default_gid; @@ -1296,9 +1295,9 @@ static int config_non_roce_gid_cache(struct ib_device *device, mutex_lock(&table->lock); for (i = 0; i < gid_tbl_len; ++i) { - if (!device->query_gid) + if (!device->ops.query_gid) continue; - ret = device->query_gid(device, port, i, &gid_attr.gid); + ret = device->ops.query_gid(device, port, i, &gid_attr.gid); if (ret) { dev_warn(&device->dev, "query_gid failed (%d) for index %d\n", ret, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index edb2cb758be7..37980c7564c0 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -343,7 +343,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, ret = -ENODEV; goto out; } - ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr); + ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0); if (IS_ERR(ah)) { ret = PTR_ERR(ah); goto out; @@ -355,7 +355,7 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(m)) { - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, 0); ret = PTR_ERR(m); goto out; } @@ -400,7 +400,7 @@ static int cm_create_response_msg_ah(struct cm_port *port, static void cm_free_msg(struct ib_mad_send_buf *msg) { if (msg->ah) - rdma_destroy_ah(msg->ah); + rdma_destroy_ah(msg->ah, 0); if (msg->context[0]) cm_deref_id(msg->context[0]); ib_free_send_mad(msg); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 15d5bb7bf6bb..63a7cc00bae0 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -494,7 +494,7 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv, id_priv->id.route.addr.dev_addr.transport = rdma_node_get_transport(cma_dev->device->node_type); list_add_tail(&id_priv->list, &cma_dev->id_list); - rdma_restrack_add(&id_priv->res); + rdma_restrack_kadd(&id_priv->res); } static void cma_attach_to_dev(struct rdma_id_private *id_priv, diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c index 8c2dfb3e294e..3ec2c415bb70 100644 --- a/drivers/infiniband/core/cma_configfs.c +++ b/drivers/infiniband/core/cma_configfs.c @@ -33,7 +33,10 @@ #include <linux/module.h> #include <linux/configfs.h> #include <rdma/ib_verbs.h> +#include <rdma/rdma_cm.h> + #include "core_priv.h" +#include "cma_priv.h" struct cma_device; diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h index 194cfe78c447..cf47c69436a7 100644 --- a/drivers/infiniband/core/cma_priv.h +++ b/drivers/infiniband/core/cma_priv.h @@ -94,4 +94,32 @@ struct rdma_id_private { */ struct rdma_restrack_entry res; }; + +#if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) +int cma_configfs_init(void); +void cma_configfs_exit(void); +#else +static inline int cma_configfs_init(void) +{ + return 0; +} + +static inline void cma_configfs_exit(void) +{ +} +#endif + +void cma_ref_dev(struct cma_device *dev); +void cma_deref_dev(struct cma_device *dev); +typedef bool (*cma_device_filter)(struct ib_device *, void *); +struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, + void *cookie); +int cma_get_default_gid_type(struct cma_device *dev, unsigned int port); +int cma_set_default_gid_type(struct cma_device *dev, unsigned int port, + enum ib_gid_type default_gid_type); +int cma_get_default_roce_tos(struct cma_device *dev, unsigned int port); +int cma_set_default_roce_tos(struct cma_device *dev, unsigned int port, + u8 default_roce_tos); +struct ib_device *cma_get_ib_dev(struct cma_device *dev); + #endif /* _CMA_PRIV_H */ diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index bb9007a0cca7..3cd830d52967 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -54,35 +54,6 @@ struct pkey_index_qp_list { struct list_head qp_list; }; -#if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) -int cma_configfs_init(void); -void cma_configfs_exit(void); -#else -static inline int cma_configfs_init(void) -{ - return 0; -} - -static inline void cma_configfs_exit(void) -{ -} -#endif -struct cma_device; -void cma_ref_dev(struct cma_device *cma_dev); -void cma_deref_dev(struct cma_device *cma_dev); -typedef bool (*cma_device_filter)(struct ib_device *, void *); -struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, - void *cookie); -int cma_get_default_gid_type(struct cma_device *cma_dev, - unsigned int port); -int cma_set_default_gid_type(struct cma_device *cma_dev, - unsigned int port, - enum ib_gid_type default_gid_type); -int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port); -int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port, - u8 default_roce_tos); -struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev); - int ib_device_register_sysfs(struct ib_device *device, int (*port_callback)(struct ib_device *, u8, struct kobject *)); @@ -244,10 +215,10 @@ static inline int ib_security_modify_qp(struct ib_qp *qp, int qp_attr_mask, struct ib_udata *udata) { - return qp->device->modify_qp(qp->real_qp, - qp_attr, - qp_attr_mask, - udata); + return qp->device->ops.modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); } static inline int ib_create_qp_security(struct ib_qp *qp, @@ -296,6 +267,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, #endif struct ib_device *ib_device_get_by_index(u32 ifindex); +void ib_device_put(struct ib_device *device); /* RDMA device netlink */ void nldev_init(void); void nldev_exit(void); @@ -308,10 +280,10 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, { struct ib_qp *qp; - if (!dev->create_qp) + if (!dev->ops.create_qp) return ERR_PTR(-EOPNOTSUPP); - qp = dev->create_qp(pd, attr, udata); + qp = dev->ops.create_qp(pd, attr, udata); if (IS_ERR(qp)) return qp; @@ -325,7 +297,10 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev, */ if (attr->qp_type < IB_QPT_XRC_INI) { qp->res.type = RDMA_RESTRACK_QP; - rdma_restrack_add(&qp->res); + if (uobj) + rdma_restrack_uadd(&qp->res); + else + rdma_restrack_kadd(&qp->res); } else qp->res.valid = false; diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index b1e5365ddafa..d61e5e1427c2 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c @@ -145,7 +145,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, struct ib_cq *cq; int ret = -ENOMEM; - cq = dev->create_cq(dev, &cq_attr, NULL, NULL); + cq = dev->ops.create_cq(dev, &cq_attr, NULL, NULL); if (IS_ERR(cq)) return cq; @@ -162,7 +162,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, cq->res.type = RDMA_RESTRACK_CQ; rdma_restrack_set_task(&cq->res, caller); - rdma_restrack_add(&cq->res); + rdma_restrack_kadd(&cq->res); switch (cq->poll_ctx) { case IB_POLL_DIRECT: @@ -193,7 +193,7 @@ out_free_wc: kfree(cq->wc); rdma_restrack_del(&cq->res); out_destroy_cq: - cq->device->destroy_cq(cq); + cq->device->ops.destroy_cq(cq); return ERR_PTR(ret); } EXPORT_SYMBOL(__ib_alloc_cq); @@ -225,7 +225,7 @@ void ib_free_cq(struct ib_cq *cq) kfree(cq->wc); rdma_restrack_del(&cq->res); - ret = cq->device->destroy_cq(cq); + ret = cq->device->ops.destroy_cq(cq); WARN_ON_ONCE(ret); } EXPORT_SYMBOL(ib_free_cq); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 87eb4f2cdd7d..47ab34ee1a9d 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -96,7 +96,7 @@ static struct notifier_block ibdev_lsm_nb = { static int ib_device_check_mandatory(struct ib_device *device) { -#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } +#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x } static const struct { size_t offset; char *name; @@ -122,7 +122,8 @@ static int ib_device_check_mandatory(struct ib_device *device) int i; for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { - if (!*(void **) ((void *) device + mandatory_table[i].offset)) { + if (!*(void **) ((void *) &device->ops + + mandatory_table[i].offset)) { dev_warn(&device->dev, "Device is missing mandatory function %s\n", mandatory_table[i].name); @@ -145,7 +146,8 @@ static struct ib_device *__ib_device_get_by_index(u32 index) } /* - * Caller is responsible to return refrerence count by calling put_device() + * Caller must perform ib_device_put() to return the device reference count + * when ib_device_get_by_index() returns valid device pointer. */ struct ib_device *ib_device_get_by_index(u32 index) { @@ -153,13 +155,21 @@ struct ib_device *ib_device_get_by_index(u32 index) down_read(&lists_rwsem); device = __ib_device_get_by_index(index); - if (device) - get_device(&device->dev); - + if (device) { + /* Do not return a device if unregistration has started. */ + if (!refcount_inc_not_zero(&device->refcount)) + device = NULL; + } up_read(&lists_rwsem); return device; } +void ib_device_put(struct ib_device *device) +{ + if (refcount_dec_and_test(&device->refcount)) + complete(&device->unreg_completion); +} + static struct ib_device *__ib_device_get_by_name(const char *name) { struct ib_device *device; @@ -293,6 +303,8 @@ struct ib_device *ib_alloc_device(size_t size) rwlock_init(&device->client_data_lock); INIT_LIST_HEAD(&device->client_data_list); INIT_LIST_HEAD(&device->port_list); + refcount_set(&device->refcount, 1); + init_completion(&device->unreg_completion); return device; } @@ -362,8 +374,8 @@ static int read_port_immutable(struct ib_device *device) return -ENOMEM; for (port = start_port; port <= end_port; ++port) { - ret = device->get_port_immutable(device, port, - &device->port_immutable[port]); + ret = device->ops.get_port_immutable( + device, port, &device->port_immutable[port]); if (ret) return ret; @@ -375,8 +387,8 @@ static int read_port_immutable(struct ib_device *device) void ib_get_device_fw_str(struct ib_device *dev, char *str) { - if (dev->get_dev_fw_str) - dev->get_dev_fw_str(dev, str); + if (dev->ops.get_dev_fw_str) + dev->ops.get_dev_fw_str(dev, str); else str[0] = '\0'; } @@ -525,7 +537,7 @@ static int setup_device(struct ib_device *device) } memset(&device->attrs, 0, sizeof(device->attrs)); - ret = device->query_device(device, &device->attrs, &uhw); + ret = device->ops.query_device(device, &device->attrs, &uhw); if (ret) { dev_warn(&device->dev, "Couldn't query the device attributes\n"); @@ -641,6 +653,13 @@ void ib_unregister_device(struct ib_device *device) struct ib_client_data *context, *tmp; unsigned long flags; + /* + * Wait for all netlink command callers to finish working on the + * device. + */ + ib_device_put(device); + wait_for_completion(&device->unreg_completion); + mutex_lock(&device_mutex); down_write(&lists_rwsem); @@ -905,14 +924,14 @@ int ib_query_port(struct ib_device *device, return -EINVAL; memset(port_attr, 0, sizeof(*port_attr)); - err = device->query_port(device, port_num, port_attr); + err = device->ops.query_port(device, port_num, port_attr); if (err || port_attr->subnet_prefix) return err; if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) return 0; - err = device->query_gid(device, port_num, 0, &gid); + err = device->ops.query_gid(device, port_num, 0, &gid); if (err) return err; @@ -946,8 +965,8 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev, if (rdma_protocol_roce(ib_dev, port)) { struct net_device *idev = NULL; - if (ib_dev->get_netdev) - idev = ib_dev->get_netdev(ib_dev, port); + if (ib_dev->ops.get_netdev) + idev = ib_dev->ops.get_netdev(ib_dev, port); if (idev && idev->reg_state >= NETREG_UNREGISTERED) { @@ -1024,7 +1043,10 @@ int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb, int ib_query_pkey(struct ib_device *device, u8 port_num, u16 index, u16 *pkey) { - return device->query_pkey(device, port_num, index, pkey); + if (!rdma_is_port_valid(device, port_num)) + return -EINVAL; + + return device->ops.query_pkey(device, port_num, index, pkey); } EXPORT_SYMBOL(ib_query_pkey); @@ -1041,11 +1063,11 @@ int ib_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) { - if (!device->modify_device) + if (!device->ops.modify_device) return -ENOSYS; - return device->modify_device(device, device_modify_mask, - device_modify); + return device->ops.modify_device(device, device_modify_mask, + device_modify); } EXPORT_SYMBOL(ib_modify_device); @@ -1069,9 +1091,10 @@ int ib_modify_port(struct ib_device *device, if (!rdma_is_port_valid(device, port_num)) return -EINVAL; - if (device->modify_port) - rc = device->modify_port(device, port_num, port_modify_mask, - port_modify); + if (device->ops.modify_port) + rc = device->ops.modify_port(device, port_num, + port_modify_mask, + port_modify); else rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS; return rc; @@ -1198,6 +1221,105 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, } EXPORT_SYMBOL(ib_get_net_dev_by_params); +void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) +{ + struct ib_device_ops *dev_ops = &dev->ops; +#define SET_DEVICE_OP(ptr, name) \ + do { \ + if (ops->name) \ + if (!((ptr)->name)) \ + (ptr)->name = ops->name; \ + } while (0) + + SET_DEVICE_OP(dev_ops, add_gid); + SET_DEVICE_OP(dev_ops, alloc_dm); + SET_DEVICE_OP(dev_ops, alloc_fmr); + SET_DEVICE_OP(dev_ops, alloc_hw_stats); + SET_DEVICE_OP(dev_ops, alloc_mr); + SET_DEVICE_OP(dev_ops, alloc_mw); + SET_DEVICE_OP(dev_ops, alloc_pd); + SET_DEVICE_OP(dev_ops, alloc_rdma_netdev); + SET_DEVICE_OP(dev_ops, alloc_ucontext); + SET_DEVICE_OP(dev_ops, alloc_xrcd); + SET_DEVICE_OP(dev_ops, attach_mcast); + SET_DEVICE_OP(dev_ops, check_mr_status); + SET_DEVICE_OP(dev_ops, create_ah); + SET_DEVICE_OP(dev_ops, create_counters); + SET_DEVICE_OP(dev_ops, create_cq); + SET_DEVICE_OP(dev_ops, create_flow); + SET_DEVICE_OP(dev_ops, create_flow_action_esp); + SET_DEVICE_OP(dev_ops, create_qp); + SET_DEVICE_OP(dev_ops, create_rwq_ind_table); + SET_DEVICE_OP(dev_ops, create_srq); + SET_DEVICE_OP(dev_ops, create_wq); + SET_DEVICE_OP(dev_ops, dealloc_dm); + SET_DEVICE_OP(dev_ops, dealloc_fmr); + SET_DEVICE_OP(dev_ops, dealloc_mw); + SET_DEVICE_OP(dev_ops, dealloc_pd); + SET_DEVICE_OP(dev_ops, dealloc_ucontext); + SET_DEVICE_OP(dev_ops, dealloc_xrcd); + SET_DEVICE_OP(dev_ops, del_gid); + SET_DEVICE_OP(dev_ops, dereg_mr); + SET_DEVICE_OP(dev_ops, destroy_ah); + SET_DEVICE_OP(dev_ops, destroy_counters); + SET_DEVICE_OP(dev_ops, destroy_cq); + SET_DEVICE_OP(dev_ops, destroy_flow); + SET_DEVICE_OP(dev_ops, destroy_flow_action); + SET_DEVICE_OP(dev_ops, destroy_qp); + SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table); + SET_DEVICE_OP(dev_ops, destroy_srq); + SET_DEVICE_OP(dev_ops, destroy_wq); + SET_DEVICE_OP(dev_ops, detach_mcast); + SET_DEVICE_OP(dev_ops, disassociate_ucontext); + SET_DEVICE_OP(dev_ops, drain_rq); + SET_DEVICE_OP(dev_ops, drain_sq); + SET_DEVICE_OP(dev_ops, get_dev_fw_str); + SET_DEVICE_OP(dev_ops, get_dma_mr); + SET_DEVICE_OP(dev_ops, get_hw_stats); + SET_DEVICE_OP(dev_ops, get_link_layer); + SET_DEVICE_OP(dev_ops, get_netdev); + SET_DEVICE_OP(dev_ops, get_port_immutable); + SET_DEVICE_OP(dev_ops, get_vector_affinity); + SET_DEVICE_OP(dev_ops, get_vf_config); + SET_DEVICE_OP(dev_ops, get_vf_stats); + SET_DEVICE_OP(dev_ops, map_mr_sg); + SET_DEVICE_OP(dev_ops, map_phys_fmr); + SET_DEVICE_OP(dev_ops, mmap); + SET_DEVICE_OP(dev_ops, modify_ah); + SET_DEVICE_OP(dev_ops, modify_cq); + SET_DEVICE_OP(dev_ops, modify_device); + SET_DEVICE_OP(dev_ops, modify_flow_action_esp); + SET_DEVICE_OP(dev_ops, modify_port); + SET_DEVICE_OP(dev_ops, modify_qp); + SET_DEVICE_OP(dev_ops, modify_srq); + SET_DEVICE_OP(dev_ops, modify_wq); + SET_DEVICE_OP(dev_ops, peek_cq); + SET_DEVICE_OP(dev_ops, poll_cq); + SET_DEVICE_OP(dev_ops, post_recv); + SET_DEVICE_OP(dev_ops, post_send); + SET_DEVICE_OP(dev_ops, post_srq_recv); + SET_DEVICE_OP(dev_ops, process_mad); + SET_DEVICE_OP(dev_ops, query_ah); + SET_DEVICE_OP(dev_ops, query_device); + SET_DEVICE_OP(dev_ops, query_gid); + SET_DEVICE_OP(dev_ops, query_pkey); + SET_DEVICE_OP(dev_ops, query_port); + SET_DEVICE_OP(dev_ops, query_qp); + SET_DEVICE_OP(dev_ops, query_srq); + SET_DEVICE_OP(dev_ops, rdma_netdev_get_params); + SET_DEVICE_OP(dev_ops, read_counters); + SET_DEVICE_OP(dev_ops, reg_dm_mr); + SET_DEVICE_OP(dev_ops, reg_user_mr); + SET_DEVICE_OP(dev_ops, req_ncomp_notif); + SET_DEVICE_OP(dev_ops, req_notify_cq); + SET_DEVICE_OP(dev_ops, rereg_user_mr); + SET_DEVICE_OP(dev_ops, resize_cq); + SET_DEVICE_OP(dev_ops, set_vf_guid); + SET_DEVICE_OP(dev_ops, set_vf_link_state); + SET_DEVICE_OP(dev_ops, unmap_fmr); +} +EXPORT_SYMBOL(ib_set_device_ops); + static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { [RDMA_NL_LS_OP_RESOLVE] = { .doit = ib_nl_handle_resolve_resp, diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 83ba0068e8bb..7d841b689a1e 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c @@ -211,8 +211,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, return ERR_PTR(-EINVAL); device = pd->device; - if (!device->alloc_fmr || !device->dealloc_fmr || - !device->map_phys_fmr || !device->unmap_fmr) { + if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr || + !device->ops.map_phys_fmr || !device->ops.unmap_fmr) { dev_info(&device->dev, "Device does not support FMRs\n"); return ERR_PTR(-ENOSYS); } @@ -474,7 +474,7 @@ EXPORT_SYMBOL(ib_fmr_pool_map_phys); * Unmap an FMR. The FMR mapping may remain valid until the FMR is * reused (or until ib_flush_fmr_pool() is called). */ -int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) +void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) { struct ib_fmr_pool *pool; unsigned long flags; @@ -503,7 +503,5 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) #endif spin_unlock_irqrestore(&pool->pool_lock, flags); - - return 0; } EXPORT_SYMBOL(ib_fmr_pool_unmap); diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index ba668d49c751..476abc74178e 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -502,17 +502,21 @@ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr, */ static int iw_cm_map(struct iw_cm_id *cm_id, bool active) { + const char *devname = dev_name(&cm_id->device->dev); + const char *ifname = cm_id->device->iwcm->ifname; struct iwpm_dev_data pm_reg_msg; struct iwpm_sa_data pm_msg; int status; + if (strlen(devname) >= sizeof(pm_reg_msg.dev_name) || + strlen(ifname) >= sizeof(pm_reg_msg.if_name)) + return -EINVAL; + cm_id->m_local_addr = cm_id->local_addr; cm_id->m_remote_addr = cm_id->remote_addr; - memcpy(pm_reg_msg.dev_name, dev_name(&cm_id->device->dev), - sizeof(pm_reg_msg.dev_name)); - memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname, - sizeof(pm_reg_msg.if_name)); + strncpy(pm_reg_msg.dev_name, devname, sizeof(pm_reg_msg.dev_name)); + strncpy(pm_reg_msg.if_name, ifname, sizeof(pm_reg_msg.if_name)); if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) || !iwpm_valid_pid()) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index d7025cd5be28..7870823bac47 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -888,10 +888,10 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, } /* No GRH for DR SMP */ - ret = device->process_mad(device, 0, port_num, &mad_wc, NULL, - (const struct ib_mad_hdr *)smp, mad_size, - (struct ib_mad_hdr *)mad_priv->mad, - &mad_size, &out_mad_pkey_index); + ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL, + (const struct ib_mad_hdr *)smp, mad_size, + (struct ib_mad_hdr *)mad_priv->mad, + &mad_size, &out_mad_pkey_index); switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: @@ -2305,14 +2305,12 @@ static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc) } /* Give driver "right of first refusal" on incoming MAD */ - if (port_priv->device->process_mad) { - ret = port_priv->device->process_mad(port_priv->device, 0, - port_priv->port_num, - wc, &recv->grh, - (const struct ib_mad_hdr *)recv->mad, - recv->mad_size, - (struct ib_mad_hdr *)response->mad, - &mad_size, &resp_mad_pkey_index); + if (port_priv->device->ops.process_mad) { + ret = port_priv->device->ops.process_mad( + port_priv->device, 0, port_priv->port_num, wc, + &recv->grh, (const struct ib_mad_hdr *)recv->mad, + recv->mad_size, (struct ib_mad_hdr *)response->mad, + &mad_size, &resp_mad_pkey_index); if (opa) wc->pkey_index = resp_mad_pkey_index; diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index e5cf09c66fe6..5ec57abc0849 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c @@ -81,7 +81,7 @@ static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) { deref_rmpp_recv(rmpp_recv); wait_for_completion(&rmpp_recv->comp); - rdma_destroy_ah(rmpp_recv->ah); + rdma_destroy_ah(rmpp_recv->ah, RDMA_DESTROY_AH_SLEEPABLE); kfree(rmpp_recv); } @@ -171,7 +171,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, hdr_len, 0, GFP_KERNEL, IB_MGMT_BASE_VERSION); if (IS_ERR(msg)) - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); else { msg->ah = ah; msg->context[0] = ah; @@ -201,7 +201,7 @@ static void ack_ds_ack(struct ib_mad_agent_private *agent, ret = ib_post_send_mad(msg, NULL); if (ret) { - rdma_destroy_ah(msg->ah); + rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(msg); } } @@ -209,7 +209,8 @@ static void ack_ds_ack(struct ib_mad_agent_private *agent, void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) { if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah) - rdma_destroy_ah(mad_send_wc->send_buf->ah); + rdma_destroy_ah(mad_send_wc->send_buf->ah, + RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(mad_send_wc->send_buf); } @@ -237,7 +238,7 @@ static void nack_recv(struct ib_mad_agent_private *agent, ret = ib_post_send_mad(msg, NULL); if (ret) { - rdma_destroy_ah(msg->ah); + rdma_destroy_ah(msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(msg); } } diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 573399e3ccc1..e600fc23ae62 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -227,6 +227,7 @@ static int fill_port_info(struct sk_buff *msg, struct net_device *netdev = NULL; struct ib_port_attr attr; int ret; + u64 cap_flags = 0; if (fill_nldev_handle(msg, device)) return -EMSGSIZE; @@ -239,10 +240,12 @@ static int fill_port_info(struct sk_buff *msg, return ret; if (rdma_protocol_ib(device, port)) { - BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64)); + BUILD_BUG_ON((sizeof(attr.port_cap_flags) + + sizeof(attr.port_cap_flags2)) > sizeof(u64)); + cap_flags = attr.port_cap_flags | + ((u64)attr.port_cap_flags2 << 32); if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, - (u64)attr.port_cap_flags, - RDMA_NLDEV_ATTR_PAD)) + cap_flags, RDMA_NLDEV_ATTR_PAD)) return -EMSGSIZE; if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, attr.subnet_prefix, RDMA_NLDEV_ATTR_PAD)) @@ -259,8 +262,8 @@ static int fill_port_info(struct sk_buff *msg, if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) return -EMSGSIZE; - if (device->get_netdev) - netdev = device->get_netdev(device, port); + if (device->ops.get_netdev) + netdev = device->ops.get_netdev(device, port); if (netdev && net_eq(dev_net(netdev), net)) { ret = nla_put_u32(msg, @@ -308,6 +311,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device) [RDMA_RESTRACK_QP] = "qp", [RDMA_RESTRACK_CM_ID] = "cm_id", [RDMA_RESTRACK_MR] = "mr", + [RDMA_RESTRACK_CTX] = "ctx", }; struct rdma_restrack_root *res = &device->res; @@ -636,13 +640,13 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, nlmsg_end(msg, nlh); - put_device(&device->dev); + ib_device_put(device); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: - put_device(&device->dev); + ib_device_put(device); return err; } @@ -672,7 +676,7 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, err = ib_device_rename(device, name); } - put_device(&device->dev); + ib_device_put(device); return err; } @@ -756,14 +760,14 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, goto err_free; nlmsg_end(msg, nlh); - put_device(&device->dev); + ib_device_put(device); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: - put_device(&device->dev); + ib_device_put(device); return err; } @@ -820,7 +824,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, } out: - put_device(&device->dev); + ib_device_put(device); cb->args[0] = idx; return skb->len; } @@ -859,13 +863,13 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, goto err_free; nlmsg_end(msg, nlh); - put_device(&device->dev); + ib_device_put(device); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); err_free: nlmsg_free(msg); err: - put_device(&device->dev); + ib_device_put(device); return ret; } @@ -1058,7 +1062,7 @@ next: idx++; if (!filled) goto err; - put_device(&device->dev); + ib_device_put(device); return skb->len; res_err: @@ -1069,7 +1073,7 @@ err: nlmsg_cancel(skb, nlh); err_index: - put_device(&device->dev); + ib_device_put(device); return ret; } diff --git a/drivers/infiniband/core/opa_smi.h b/drivers/infiniband/core/opa_smi.h index 3bfab3505a29..af4879bdf3d6 100644 --- a/drivers/infiniband/core/opa_smi.h +++ b/drivers/infiniband/core/opa_smi.h @@ -55,7 +55,7 @@ static inline enum smi_action opa_smi_check_local_smp(struct opa_smp *smp, { /* C14-9:3 -- We're at the end of the DR segment of path */ /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ - return (device->process_mad && + return (device->ops.process_mad && !opa_get_smp_direction(smp) && (smp->hop_ptr == smp->hop_cnt + 1)) ? IB_SMI_HANDLE : IB_SMI_DISCARD; @@ -70,7 +70,7 @@ static inline enum smi_action opa_smi_check_local_returning_smp(struct opa_smp * { /* C14-13:3 -- We're at the end of the DR segment of path */ /* C14-13:4 -- Hop Pointer == 0 -> give to SM */ - return (device->process_mad && + return (device->ops.process_mad && opa_get_smp_direction(smp) && !smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD; } diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 752a55c6bdce..6c4747e61d2b 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -224,12 +224,14 @@ out_unlock: * uverbs_put_destroy. */ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, - u32 id, struct ib_uverbs_file *ufile) + u32 id, + const struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj; int ret; - uobj = rdma_lookup_get_uobject(obj, ufile, id, UVERBS_LOOKUP_DESTROY); + uobj = rdma_lookup_get_uobject(obj, attrs->ufile, id, + UVERBS_LOOKUP_DESTROY); if (IS_ERR(uobj)) return uobj; @@ -243,21 +245,20 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, } /* - * Does both uobj_get_destroy() and uobj_put_destroy(). Returns success_res - * on success (negative errno on failure). For use by callers that do not need - * the uobj. + * Does both uobj_get_destroy() and uobj_put_destroy(). Returns 0 on success + * (negative errno on failure). For use by callers that do not need the uobj. */ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id, - struct ib_uverbs_file *ufile, int success_res) + const struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj; - uobj = __uobj_get_destroy(obj, id, ufile); + uobj = __uobj_get_destroy(obj, id, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); - return success_res; + return 0; } /* alloc_uobj must be undone by uverbs_destroy_uobject() */ @@ -267,7 +268,7 @@ static struct ib_uobject *alloc_uobj(struct ib_uverbs_file *ufile, struct ib_uobject *uobj; struct ib_ucontext *ucontext; - ucontext = ib_uverbs_get_ucontext(ufile); + ucontext = ib_uverbs_get_ucontext_file(ufile); if (IS_ERR(ucontext)) return ERR_CAST(ucontext); @@ -397,16 +398,23 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj, struct ib_uobject *uobj; int ret; - if (!obj) - return ERR_PTR(-EINVAL); + if (IS_ERR(obj) && PTR_ERR(obj) == -ENOMSG) { + /* must be UVERBS_IDR_ANY_OBJECT, see uapi_get_object() */ + uobj = lookup_get_idr_uobject(NULL, ufile, id, mode); + if (IS_ERR(uobj)) + return uobj; + } else { + if (IS_ERR(obj)) + return ERR_PTR(-EINVAL); - uobj = obj->type_class->lookup_get(obj, ufile, id, mode); - if (IS_ERR(uobj)) - return uobj; + uobj = obj->type_class->lookup_get(obj, ufile, id, mode); + if (IS_ERR(uobj)) + return uobj; - if (uobj->uapi_object != obj) { - ret = -EINVAL; - goto free; + if (uobj->uapi_object != obj) { + ret = -EINVAL; + goto free; + } } /* @@ -426,7 +434,7 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj, return uobj; free: - obj->type_class->lookup_put(uobj, mode); + uobj->uapi_object->type_class->lookup_put(uobj, mode); uverbs_uobject_put(uobj); return ERR_PTR(ret); } @@ -490,7 +498,7 @@ struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, { struct ib_uobject *ret; - if (!obj) + if (IS_ERR(obj)) return ERR_PTR(-EINVAL); /* @@ -812,18 +820,20 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile, */ if (reason == RDMA_REMOVE_DRIVER_REMOVE) { uverbs_user_mmap_disassociate(ufile); - if (ib_dev->disassociate_ucontext) - ib_dev->disassociate_ucontext(ucontext); + if (ib_dev->ops.disassociate_ucontext) + ib_dev->ops.disassociate_ucontext(ucontext); } ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); + rdma_restrack_del(&ucontext->res); + /* * FIXME: Drivers are not permitted to fail dealloc_ucontext, remove * the error return. */ - ret = ib_dev->dealloc_ucontext(ucontext); + ret = ib_dev->ops.dealloc_ucontext(ucontext); WARN_ON(ret); ufile->ucontext = NULL; diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h index 4886d2bba7c7..be6b8e1257d0 100644 --- a/drivers/infiniband/core/rdma_core.h +++ b/drivers/infiniband/core/rdma_core.h @@ -118,43 +118,67 @@ void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); * Depending on ID the slot pointer in the radix tree points at one of these * structs. */ -struct uverbs_api_object { - const struct uverbs_obj_type *type_attrs; - const struct uverbs_obj_type_class *type_class; -}; struct uverbs_api_ioctl_method { - int (__rcu *handler)(struct ib_uverbs_file *ufile, - struct uverbs_attr_bundle *ctx); + int(__rcu *handler)(struct uverbs_attr_bundle *attrs); DECLARE_BITMAP(attr_mandatory, UVERBS_API_ATTR_BKEY_LEN); u16 bundle_size; u8 use_stack:1; u8 driver_method:1; + u8 disabled:1; + u8 has_udata:1; u8 key_bitmap_len; u8 destroy_bkey; }; +struct uverbs_api_write_method { + int (*handler)(struct uverbs_attr_bundle *attrs); + u8 disabled:1; + u8 is_ex:1; + u8 has_udata:1; + u8 has_resp:1; + u8 req_size; + u8 resp_size; +}; + struct uverbs_api_attr { struct uverbs_attr_spec spec; }; -struct uverbs_api_object; struct uverbs_api { /* radix tree contains struct uverbs_api_* pointers */ struct radix_tree_root radix; enum rdma_driver_id driver_id; + + unsigned int num_write; + unsigned int num_write_ex; + struct uverbs_api_write_method notsupp_method; + const struct uverbs_api_write_method **write_methods; + const struct uverbs_api_write_method **write_ex_methods; }; +/* + * Get an uverbs_api_object that corresponds to the given object_id. + * Note: + * -ENOMSG means that any object is allowed to match during lookup. + */ static inline const struct uverbs_api_object * uapi_get_object(struct uverbs_api *uapi, u16 object_id) { - return radix_tree_lookup(&uapi->radix, uapi_key_obj(object_id)); + const struct uverbs_api_object *res; + + if (object_id == UVERBS_IDR_ANY_OBJECT) + return ERR_PTR(-ENOMSG); + + res = radix_tree_lookup(&uapi->radix, uapi_key_obj(object_id)); + if (!res) + return ERR_PTR(-ENOENT); + + return res; } char *uapi_key_format(char *S, unsigned int key); -struct uverbs_api *uverbs_alloc_api( - const struct uverbs_object_tree_def *const *driver_specs, - enum rdma_driver_id driver_id); +struct uverbs_api *uverbs_alloc_api(struct ib_device *ibdev); void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev); void uverbs_disassociate_api(struct uverbs_api *uapi); void uverbs_destroy_api(struct uverbs_api *uapi); @@ -162,4 +186,37 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm, unsigned int num_attrs); void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile); +extern const struct uapi_definition uverbs_def_obj_counters[]; +extern const struct uapi_definition uverbs_def_obj_cq[]; +extern const struct uapi_definition uverbs_def_obj_device[]; +extern const struct uapi_definition uverbs_def_obj_dm[]; +extern const struct uapi_definition uverbs_def_obj_flow_action[]; +extern const struct uapi_definition uverbs_def_obj_intf[]; +extern const struct uapi_definition uverbs_def_obj_mr[]; +extern const struct uapi_definition uverbs_def_write_intf[]; + +static inline const struct uverbs_api_write_method * +uapi_get_method(const struct uverbs_api *uapi, u32 command) +{ + u32 cmd_idx = command & IB_USER_VERBS_CMD_COMMAND_MASK; + + if (command & ~(u32)(IB_USER_VERBS_CMD_FLAG_EXTENDED | + IB_USER_VERBS_CMD_COMMAND_MASK)) + return ERR_PTR(-EINVAL); + + if (command & IB_USER_VERBS_CMD_FLAG_EXTENDED) { + if (cmd_idx >= uapi->num_write_ex) + return ERR_PTR(-EOPNOTSUPP); + return uapi->write_ex_methods[cmd_idx]; + } + + if (cmd_idx >= uapi->num_write) + return ERR_PTR(-EOPNOTSUPP); + return uapi->write_methods[cmd_idx]; +} + +void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, + struct ib_udata *udata, unsigned int attr_in, + unsigned int attr_out); + #endif /* RDMA_CORE_H */ diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c index 06d8657ce583..46a5c553c624 100644 --- a/drivers/infiniband/core/restrack.c +++ b/drivers/infiniband/core/restrack.c @@ -32,6 +32,7 @@ static const char *type2str(enum rdma_restrack_type type) [RDMA_RESTRACK_QP] = "QP", [RDMA_RESTRACK_CM_ID] = "CM_ID", [RDMA_RESTRACK_MR] = "MR", + [RDMA_RESTRACK_CTX] = "CTX", }; return names[type]; @@ -130,31 +131,14 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res) res)->id.device; case RDMA_RESTRACK_MR: return container_of(res, struct ib_mr, res)->device; + case RDMA_RESTRACK_CTX: + return container_of(res, struct ib_ucontext, res)->device; default: WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type); return NULL; } } -static bool res_is_user(struct rdma_restrack_entry *res) -{ - switch (res->type) { - case RDMA_RESTRACK_PD: - return container_of(res, struct ib_pd, res)->uobject; - case RDMA_RESTRACK_CQ: - return container_of(res, struct ib_cq, res)->uobject; - case RDMA_RESTRACK_QP: - return container_of(res, struct ib_qp, res)->uobject; - case RDMA_RESTRACK_CM_ID: - return !res->kern_name; - case RDMA_RESTRACK_MR: - return container_of(res, struct ib_mr, res)->pd->uobject; - default: - WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type); - return false; - } -} - void rdma_restrack_set_task(struct rdma_restrack_entry *res, const char *caller) { @@ -170,17 +154,17 @@ void rdma_restrack_set_task(struct rdma_restrack_entry *res, } EXPORT_SYMBOL(rdma_restrack_set_task); -void rdma_restrack_add(struct rdma_restrack_entry *res) +static void rdma_restrack_add(struct rdma_restrack_entry *res) { struct ib_device *dev = res_to_dev(res); if (!dev) return; - if (res->type != RDMA_RESTRACK_CM_ID || !res_is_user(res)) + if (res->type != RDMA_RESTRACK_CM_ID || rdma_is_kernel_res(res)) res->task = NULL; - if (res_is_user(res)) { + if (!rdma_is_kernel_res(res)) { if (!res->task) rdma_restrack_set_task(res, NULL); res->kern_name = NULL; @@ -196,7 +180,28 @@ void rdma_restrack_add(struct rdma_restrack_entry *res) hash_add(dev->res.hash, &res->node, res->type); up_write(&dev->res.rwsem); } -EXPORT_SYMBOL(rdma_restrack_add); + +/** + * rdma_restrack_kadd() - add kernel object to the reource tracking database + * @res: resource entry + */ +void rdma_restrack_kadd(struct rdma_restrack_entry *res) +{ + res->user = false; + rdma_restrack_add(res); +} +EXPORT_SYMBOL(rdma_restrack_kadd); + +/** + * rdma_restrack_uadd() - add user object to the reource tracking database + * @res: resource entry + */ +void rdma_restrack_uadd(struct rdma_restrack_entry *res) +{ + res->user = true; + rdma_restrack_add(res); +} +EXPORT_SYMBOL(rdma_restrack_uadd); int __must_check rdma_restrack_get(struct rdma_restrack_entry *res) { diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index be5ba5e15496..97e6d7b69abf 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -1147,7 +1147,7 @@ static void free_sm_ah(struct kref *kref) { struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); - rdma_destroy_ah(sm_ah->ah); + rdma_destroy_ah(sm_ah->ah, 0); kfree(sm_ah); } @@ -2276,7 +2276,8 @@ static void update_sm_ah(struct work_struct *work) cpu_to_be64(IB_SA_WELL_KNOWN_GUID)); } - new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr); + new_ah->ah = rdma_create_ah(port->agent->qp->pd, &ah_attr, + RDMA_CREATE_AH_SLEEPABLE); if (IS_ERR(new_ah->ah)) { pr_warn("Couldn't create new SM AH\n"); kfree(new_ah); diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index 1143c0448666..1efadbccf394 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -626,10 +626,10 @@ int ib_security_modify_qp(struct ib_qp *qp, } if (!ret) - ret = real_qp->device->modify_qp(real_qp, - qp_attr, - qp_attr_mask, - udata); + ret = real_qp->device->ops.modify_qp(real_qp, + qp_attr, + qp_attr_mask, + udata); if (new_pps) { /* Clean up the lists and free the appropriate diff --git a/drivers/infiniband/core/smi.h b/drivers/infiniband/core/smi.h index 33c91c8a16e9..91d9b353ab85 100644 --- a/drivers/infiniband/core/smi.h +++ b/drivers/infiniband/core/smi.h @@ -67,7 +67,7 @@ static inline enum smi_action smi_check_local_smp(struct ib_smp *smp, { /* C14-9:3 -- We're at the end of the DR segment of path */ /* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */ - return ((device->process_mad && + return ((device->ops.process_mad && !ib_get_smp_direction(smp) && (smp->hop_ptr == smp->hop_cnt + 1)) ? IB_SMI_HANDLE : IB_SMI_DISCARD); @@ -82,7 +82,7 @@ static inline enum smi_action smi_check_local_returning_smp(struct ib_smp *smp, { /* C14-13:3 -- We're at the end of the DR segment of path */ /* C14-13:4 -- Hop Pointer == 0 -> give to SM */ - return ((device->process_mad && + return ((device->ops.process_mad && ib_get_smp_direction(smp) && !smp->hop_ptr) ? IB_SMI_HANDLE : IB_SMI_DISCARD); } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 6fcce2c206c6..80f68eb0ba5c 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -462,7 +462,7 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr, u16 out_mad_pkey_index = 0; ssize_t ret; - if (!dev->process_mad) + if (!dev->ops.process_mad) return -ENOSYS; in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); @@ -481,11 +481,11 @@ static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr, if (attr != IB_PMA_CLASS_PORT_INFO) in_mad->data[41] = port_num; /* PortSelect field */ - if ((dev->process_mad(dev, IB_MAD_IGNORE_MKEY, - port_num, NULL, NULL, - (const struct ib_mad_hdr *)in_mad, mad_size, - (struct ib_mad_hdr *)out_mad, &mad_size, - &out_mad_pkey_index) & + if ((dev->ops.process_mad(dev, IB_MAD_IGNORE_MKEY, + port_num, NULL, NULL, + (const struct ib_mad_hdr *)in_mad, mad_size, + (struct ib_mad_hdr *)out_mad, &mad_size, + &out_mad_pkey_index) & (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) { ret = -EINVAL; @@ -786,7 +786,7 @@ static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats, if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan)) return 0; - ret = dev->get_hw_stats(dev, stats, port_num, index); + ret = dev->ops.get_hw_stats(dev, stats, port_num, index); if (ret < 0) return ret; if (ret == stats->num_counters) @@ -946,7 +946,7 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, struct rdma_hw_stats *stats; int i, ret; - stats = device->alloc_hw_stats(device, port_num); + stats = device->ops.alloc_hw_stats(device, port_num); if (!stats) return; @@ -964,8 +964,8 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, if (!hsag) goto err_free_stats; - ret = device->get_hw_stats(device, stats, port_num, - stats->num_counters); + ret = device->ops.get_hw_stats(device, stats, port_num, + stats->num_counters); if (ret != stats->num_counters) goto err_free_hsag; @@ -1057,7 +1057,7 @@ static int add_port(struct ib_device *device, int port_num, goto err_put; } - if (device->process_mad) { + if (device->ops.process_mad) { p->pma_table = get_counter_table(device, port_num); ret = sysfs_create_group(&p->kobj, p->pma_table); if (ret) @@ -1124,7 +1124,7 @@ static int add_port(struct ib_device *device, int port_num, * port, so holder should be device. Therefore skip per port conunter * initialization. */ - if (device->alloc_hw_stats && port_num) + if (device->ops.alloc_hw_stats && port_num) setup_hw_stats(device, p, port_num); list_add_tail(&p->kobj.entry, &device->port_list); @@ -1245,7 +1245,7 @@ static ssize_t node_desc_store(struct device *device, struct ib_device_modify desc = {}; int ret; - if (!dev->modify_device) + if (!dev->ops.modify_device) return -EIO; memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX)); @@ -1341,7 +1341,7 @@ int ib_device_register_sysfs(struct ib_device *device, } } - if (device->alloc_hw_stats) + if (device->ops.alloc_hw_stats) setup_hw_stats(device, NULL, 0); return 0; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 73332b9a25b5..7541fbaf58a3 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1242,7 +1242,7 @@ static void ib_ucm_add_one(struct ib_device *device) dev_t base; struct ib_ucm_device *ucm_dev; - if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1)) + if (!device->ops.alloc_ucontext || !rdma_cap_ib_cm(device, 1)) return; ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index f55f48f6b272..de8d31ab8945 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -88,10 +88,9 @@ enum { struct ib_umad_port { struct cdev cdev; - struct device *dev; - + struct device dev; struct cdev sm_cdev; - struct device *sm_dev; + struct device sm_dev; struct semaphore sm_sem; struct mutex file_mutex; @@ -104,8 +103,8 @@ struct ib_umad_port { }; struct ib_umad_device { - struct kobject kobj; - struct ib_umad_port port[0]; + struct kref kref; + struct ib_umad_port ports[]; }; struct ib_umad_file { @@ -130,8 +129,6 @@ struct ib_umad_packet { struct ib_user_mad mad; }; -static struct class *umad_class; - static const dev_t base_umad_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) + IB_UMAD_NUM_FIXED_MINOR; @@ -143,17 +140,23 @@ static DEFINE_IDA(umad_ida); static void ib_umad_add_one(struct ib_device *device); static void ib_umad_remove_one(struct ib_device *device, void *client_data); -static void ib_umad_release_dev(struct kobject *kobj) +static void ib_umad_dev_free(struct kref *kref) { struct ib_umad_device *dev = - container_of(kobj, struct ib_umad_device, kobj); + container_of(kref, struct ib_umad_device, kref); kfree(dev); } -static struct kobj_type ib_umad_dev_ktype = { - .release = ib_umad_release_dev, -}; +static void ib_umad_dev_get(struct ib_umad_device *dev) +{ + kref_get(&dev->kref); +} + +static void ib_umad_dev_put(struct ib_umad_device *dev) +{ + kref_put(&dev->kref, ib_umad_dev_free); +} static int hdr_size(struct ib_umad_file *file) { @@ -205,7 +208,7 @@ static void send_handler(struct ib_mad_agent *agent, struct ib_umad_packet *packet = send_wc->send_buf->context[0]; dequeue_send(file, packet); - rdma_destroy_ah(packet->msg->ah); + rdma_destroy_ah(packet->msg->ah, RDMA_DESTROY_AH_SLEEPABLE); ib_free_send_mad(packet->msg); if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { @@ -621,7 +624,7 @@ err_send: err_msg: ib_free_send_mad(packet->msg); err_ah: - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); err_up: mutex_unlock(&file->mutex); err: @@ -657,7 +660,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, mutex_lock(&file->mutex); if (!file->port->ib_dev) { - dev_notice(file->port->dev, + dev_notice(&file->port->dev, "ib_umad_reg_agent: invalid device\n"); ret = -EPIPE; goto out; @@ -669,7 +672,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, } if (ureq.qpn != 0 && ureq.qpn != 1) { - dev_notice(file->port->dev, + dev_notice(&file->port->dev, "ib_umad_reg_agent: invalid QPN %d specified\n", ureq.qpn); ret = -EINVAL; @@ -680,7 +683,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, if (!__get_agent(file, agent_id)) goto found; - dev_notice(file->port->dev, + dev_notice(&file->port->dev, "ib_umad_reg_agent: Max Agents (%u) reached\n", IB_UMAD_MAX_AGENTS); ret = -ENOMEM; @@ -725,10 +728,10 @@ found: if (!file->already_used) { file->already_used = 1; if (!file->use_pkey_index) { - dev_warn(file->port->dev, + dev_warn(&file->port->dev, "process %s did not enable P_Key index support.\n", current->comm); - dev_warn(file->port->dev, + dev_warn(&file->port->dev, " Documentation/infiniband/user_mad.txt has info on the new ABI.\n"); } } @@ -759,7 +762,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) mutex_lock(&file->mutex); if (!file->port->ib_dev) { - dev_notice(file->port->dev, + dev_notice(&file->port->dev, "ib_umad_reg_agent2: invalid device\n"); ret = -EPIPE; goto out; @@ -771,7 +774,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) } if (ureq.qpn != 0 && ureq.qpn != 1) { - dev_notice(file->port->dev, + dev_notice(&file->port->dev, "ib_umad_reg_agent2: invalid QPN %d specified\n", ureq.qpn); ret = -EINVAL; @@ -779,7 +782,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) } if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { - dev_notice(file->port->dev, + dev_notice(&file->port->dev, "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n", ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); ret = -EINVAL; @@ -796,7 +799,7 @@ static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) if (!__get_agent(file, agent_id)) goto found; - dev_notice(file->port->dev, + dev_notice(&file->port->dev, "ib_umad_reg_agent2: Max Agents (%u) reached\n", IB_UMAD_MAX_AGENTS); ret = -ENOMEM; @@ -808,7 +811,7 @@ found: req.mgmt_class = ureq.mgmt_class; req.mgmt_class_version = ureq.mgmt_class_version; if (ureq.oui & 0xff000000) { - dev_notice(file->port->dev, + dev_notice(&file->port->dev, "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n", ureq.oui); ret = -EINVAL; @@ -986,8 +989,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp) goto out; } - kobject_get(&port->umad_dev->kobj); - + ib_umad_dev_get(port->umad_dev); out: mutex_unlock(&port->file_mutex); return ret; @@ -1025,8 +1027,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp) mutex_unlock(&file->port->file_mutex); kfree(file); - kobject_put(&dev->kobj); - + ib_umad_dev_put(dev); return 0; } @@ -1076,8 +1077,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp) if (ret) goto err_clr_sm_cap; - kobject_get(&port->umad_dev->kobj); - + ib_umad_dev_get(port->umad_dev); return 0; err_clr_sm_cap: @@ -1106,8 +1106,7 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp) up(&port->sm_sem); - kobject_put(&port->umad_dev->kobj); - + ib_umad_dev_put(port->umad_dev); return ret; } @@ -1124,7 +1123,7 @@ static struct ib_client umad_client = { .remove = ib_umad_remove_one }; -static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, +static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ib_umad_port *port = dev_get_drvdata(dev); @@ -1134,9 +1133,9 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%s\n", dev_name(&port->ib_dev->dev)); } -static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); +static DEVICE_ATTR_RO(ibdev); -static ssize_t show_port(struct device *dev, struct device_attribute *attr, +static ssize_t port_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ib_umad_port *port = dev_get_drvdata(dev); @@ -1146,10 +1145,59 @@ static ssize_t show_port(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%d\n", port->port_num); } -static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); +static DEVICE_ATTR_RO(port); -static CLASS_ATTR_STRING(abi_version, S_IRUGO, - __stringify(IB_USER_MAD_ABI_VERSION)); +static struct attribute *umad_class_dev_attrs[] = { + &dev_attr_ibdev.attr, + &dev_attr_port.attr, + NULL, +}; +ATTRIBUTE_GROUPS(umad_class_dev); + +static char *umad_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); +} + +static ssize_t abi_version_show(struct class *class, + struct class_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION); +} +static CLASS_ATTR_RO(abi_version); + +static struct attribute *umad_class_attrs[] = { + &class_attr_abi_version.attr, + NULL, +}; +ATTRIBUTE_GROUPS(umad_class); + +static struct class umad_class = { + .name = "infiniband_mad", + .devnode = umad_devnode, + .class_groups = umad_class_groups, + .dev_groups = umad_class_dev_groups, +}; + +static void ib_umad_release_port(struct device *device) +{ + struct ib_umad_port *port = dev_get_drvdata(device); + struct ib_umad_device *umad_dev = port->umad_dev; + + ib_umad_dev_put(umad_dev); +} + +static void ib_umad_init_port_dev(struct device *dev, + struct ib_umad_port *port, + const struct ib_device *device) +{ + device_initialize(dev); + ib_umad_dev_get(port->umad_dev); + dev->class = &umad_class; + dev->parent = device->dev.parent; + dev_set_drvdata(dev, port); + dev->release = ib_umad_release_port; +} static int ib_umad_init_port(struct ib_device *device, int port_num, struct ib_umad_device *umad_dev, @@ -1158,6 +1206,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, int devnum; dev_t base_umad; dev_t base_issm; + int ret; devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL); if (devnum < 0) @@ -1172,63 +1221,41 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, } port->ib_dev = device; + port->umad_dev = umad_dev; port->port_num = port_num; sema_init(&port->sm_sem, 1); mutex_init(&port->file_mutex); INIT_LIST_HEAD(&port->file_list); + ib_umad_init_port_dev(&port->dev, port, device); + port->dev.devt = base_umad; + dev_set_name(&port->dev, "umad%d", port->dev_num); cdev_init(&port->cdev, &umad_fops); port->cdev.owner = THIS_MODULE; - cdev_set_parent(&port->cdev, &umad_dev->kobj); - kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); - if (cdev_add(&port->cdev, base_umad, 1)) - goto err_cdev; - port->dev = device_create(umad_class, device->dev.parent, - port->cdev.dev, port, - "umad%d", port->dev_num); - if (IS_ERR(port->dev)) + ret = cdev_device_add(&port->cdev, &port->dev); + if (ret) goto err_cdev; - if (device_create_file(port->dev, &dev_attr_ibdev)) - goto err_dev; - if (device_create_file(port->dev, &dev_attr_port)) - goto err_dev; - + ib_umad_init_port_dev(&port->sm_dev, port, device); + port->sm_dev.devt = base_issm; + dev_set_name(&port->sm_dev, "issm%d", port->dev_num); cdev_init(&port->sm_cdev, &umad_sm_fops); port->sm_cdev.owner = THIS_MODULE; - cdev_set_parent(&port->sm_cdev, &umad_dev->kobj); - kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); - if (cdev_add(&port->sm_cdev, base_issm, 1)) - goto err_sm_cdev; - - port->sm_dev = device_create(umad_class, device->dev.parent, - port->sm_cdev.dev, port, - "issm%d", port->dev_num); - if (IS_ERR(port->sm_dev)) - goto err_sm_cdev; - - if (device_create_file(port->sm_dev, &dev_attr_ibdev)) - goto err_sm_dev; - if (device_create_file(port->sm_dev, &dev_attr_port)) - goto err_sm_dev; - - return 0; -err_sm_dev: - device_destroy(umad_class, port->sm_cdev.dev); + ret = cdev_device_add(&port->sm_cdev, &port->sm_dev); + if (ret) + goto err_dev; -err_sm_cdev: - cdev_del(&port->sm_cdev); + return 0; err_dev: - device_destroy(umad_class, port->cdev.dev); - + put_device(&port->sm_dev); + cdev_device_del(&port->cdev, &port->dev); err_cdev: - cdev_del(&port->cdev); + put_device(&port->dev); ida_free(&umad_ida, devnum); - - return -1; + return ret; } static void ib_umad_kill_port(struct ib_umad_port *port) @@ -1236,17 +1263,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port) struct ib_umad_file *file; int id; - dev_set_drvdata(port->dev, NULL); - dev_set_drvdata(port->sm_dev, NULL); - - device_destroy(umad_class, port->cdev.dev); - device_destroy(umad_class, port->sm_cdev.dev); - - cdev_del(&port->cdev); - cdev_del(&port->sm_cdev); - mutex_lock(&port->file_mutex); + /* Mark ib_dev NULL and block ioctl or other file ops to progress + * further. + */ port->ib_dev = NULL; list_for_each_entry(file, &port->file_list, port_list) { @@ -1260,6 +1281,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port) } mutex_unlock(&port->file_mutex); + + cdev_device_del(&port->sm_cdev, &port->sm_dev); + put_device(&port->sm_dev); + cdev_device_del(&port->cdev, &port->dev); + put_device(&port->dev); ida_free(&umad_ida, port->dev_num); } @@ -1272,22 +1298,17 @@ static void ib_umad_add_one(struct ib_device *device) s = rdma_start_port(device); e = rdma_end_port(device); - umad_dev = kzalloc(sizeof *umad_dev + - (e - s + 1) * sizeof (struct ib_umad_port), - GFP_KERNEL); + umad_dev = kzalloc(struct_size(umad_dev, ports, e - s + 1), GFP_KERNEL); if (!umad_dev) return; - kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); - + kref_init(&umad_dev->kref); for (i = s; i <= e; ++i) { if (!rdma_cap_ib_mad(device, i)) continue; - umad_dev->port[i - s].umad_dev = umad_dev; - if (ib_umad_init_port(device, i, umad_dev, - &umad_dev->port[i - s])) + &umad_dev->ports[i - s])) goto err; count++; @@ -1305,10 +1326,10 @@ err: if (!rdma_cap_ib_mad(device, i)) continue; - ib_umad_kill_port(&umad_dev->port[i - s]); + ib_umad_kill_port(&umad_dev->ports[i - s]); } free: - kobject_put(&umad_dev->kobj); + ib_umad_dev_put(umad_dev); } static void ib_umad_remove_one(struct ib_device *device, void *client_data) @@ -1321,15 +1342,9 @@ static void ib_umad_remove_one(struct ib_device *device, void *client_data) for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) { if (rdma_cap_ib_mad(device, i + rdma_start_port(device))) - ib_umad_kill_port(&umad_dev->port[i]); + ib_umad_kill_port(&umad_dev->ports[i]); } - - kobject_put(&umad_dev->kobj); -} - -static char *umad_devnode(struct device *dev, umode_t *mode) -{ - return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); + ib_umad_dev_put(umad_dev); } static int __init ib_umad_init(void) @@ -1338,7 +1353,7 @@ static int __init ib_umad_init(void) ret = register_chrdev_region(base_umad_dev, IB_UMAD_NUM_FIXED_MINOR * 2, - "infiniband_mad"); + umad_class.name); if (ret) { pr_err("couldn't register device number\n"); goto out; @@ -1346,28 +1361,19 @@ static int __init ib_umad_init(void) ret = alloc_chrdev_region(&dynamic_umad_dev, 0, IB_UMAD_NUM_DYNAMIC_MINOR * 2, - "infiniband_mad"); + umad_class.name); if (ret) { pr_err("couldn't register dynamic device number\n"); goto out_alloc; } dynamic_issm_dev = dynamic_umad_dev + IB_UMAD_NUM_DYNAMIC_MINOR; - umad_class = class_create(THIS_MODULE, "infiniband_mad"); - if (IS_ERR(umad_class)) { - ret = PTR_ERR(umad_class); + ret = class_register(&umad_class); + if (ret) { pr_err("couldn't create class infiniband_mad\n"); goto out_chrdev; } - umad_class->devnode = umad_devnode; - - ret = class_create_file(umad_class, &class_attr_abi_version.attr); - if (ret) { - pr_err("couldn't create abi_version attribute\n"); - goto out_class; - } - ret = ib_register_client(&umad_client); if (ret) { pr_err("couldn't register ib_umad client\n"); @@ -1377,7 +1383,7 @@ static int __init ib_umad_init(void) return 0; out_class: - class_destroy(umad_class); + class_unregister(&umad_class); out_chrdev: unregister_chrdev_region(dynamic_umad_dev, @@ -1394,7 +1400,7 @@ out: static void __exit ib_umad_cleanup(void) { ib_unregister_client(&umad_client); - class_destroy(umad_class); + class_unregister(&umad_class); unregister_chrdev_region(base_umad_dev, IB_UMAD_NUM_FIXED_MINOR * 2); unregister_chrdev_region(dynamic_umad_dev, diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index c97935a0c7c6..ea0bc6885517 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -161,9 +161,6 @@ struct ib_uverbs_file { struct mutex umap_lock; struct list_head umaps; - u64 uverbs_cmd_mask; - u64 uverbs_ex_cmd_mask; - struct idr idr; /* spinlock protects write access to idr */ spinlock_t idr_lock; @@ -249,7 +246,6 @@ int uverbs_dealloc_mw(struct ib_mw *mw); void ib_uverbs_detach_umcast(struct ib_qp *qp, struct ib_uqp_object *uobj); -void create_udata(struct uverbs_attr_bundle *ctx, struct ib_udata *udata); long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); struct ib_uverbs_flow_spec { @@ -297,63 +293,29 @@ extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION); extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_DM); extern const struct uverbs_object_def UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS); -#define IB_UVERBS_DECLARE_CMD(name) \ - ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ - const char __user *buf, int in_len, \ - int out_len) - -IB_UVERBS_DECLARE_CMD(get_context); -IB_UVERBS_DECLARE_CMD(query_device); -IB_UVERBS_DECLARE_CMD(query_port); -IB_UVERBS_DECLARE_CMD(alloc_pd); -IB_UVERBS_DECLARE_CMD(dealloc_pd); -IB_UVERBS_DECLARE_CMD(reg_mr); -IB_UVERBS_DECLARE_CMD(rereg_mr); -IB_UVERBS_DECLARE_CMD(dereg_mr); -IB_UVERBS_DECLARE_CMD(alloc_mw); -IB_UVERBS_DECLARE_CMD(dealloc_mw); -IB_UVERBS_DECLARE_CMD(create_comp_channel); -IB_UVERBS_DECLARE_CMD(create_cq); -IB_UVERBS_DECLARE_CMD(resize_cq); -IB_UVERBS_DECLARE_CMD(poll_cq); -IB_UVERBS_DECLARE_CMD(req_notify_cq); -IB_UVERBS_DECLARE_CMD(destroy_cq); -IB_UVERBS_DECLARE_CMD(create_qp); -IB_UVERBS_DECLARE_CMD(open_qp); -IB_UVERBS_DECLARE_CMD(query_qp); -IB_UVERBS_DECLARE_CMD(modify_qp); -IB_UVERBS_DECLARE_CMD(destroy_qp); -IB_UVERBS_DECLARE_CMD(post_send); -IB_UVERBS_DECLARE_CMD(post_recv); -IB_UVERBS_DECLARE_CMD(post_srq_recv); -IB_UVERBS_DECLARE_CMD(create_ah); -IB_UVERBS_DECLARE_CMD(destroy_ah); -IB_UVERBS_DECLARE_CMD(attach_mcast); -IB_UVERBS_DECLARE_CMD(detach_mcast); -IB_UVERBS_DECLARE_CMD(create_srq); -IB_UVERBS_DECLARE_CMD(modify_srq); -IB_UVERBS_DECLARE_CMD(query_srq); -IB_UVERBS_DECLARE_CMD(destroy_srq); -IB_UVERBS_DECLARE_CMD(create_xsrq); -IB_UVERBS_DECLARE_CMD(open_xrcd); -IB_UVERBS_DECLARE_CMD(close_xrcd); - -#define IB_UVERBS_DECLARE_EX_CMD(name) \ - int ib_uverbs_ex_##name(struct ib_uverbs_file *file, \ - struct ib_udata *ucore, \ - struct ib_udata *uhw) - -IB_UVERBS_DECLARE_EX_CMD(create_flow); -IB_UVERBS_DECLARE_EX_CMD(destroy_flow); -IB_UVERBS_DECLARE_EX_CMD(query_device); -IB_UVERBS_DECLARE_EX_CMD(create_cq); -IB_UVERBS_DECLARE_EX_CMD(create_qp); -IB_UVERBS_DECLARE_EX_CMD(create_wq); -IB_UVERBS_DECLARE_EX_CMD(modify_wq); -IB_UVERBS_DECLARE_EX_CMD(destroy_wq); -IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table); -IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table); -IB_UVERBS_DECLARE_EX_CMD(modify_qp); -IB_UVERBS_DECLARE_EX_CMD(modify_cq); +/* + * ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the + * PortInfo CapabilityMask, but was extended with unique bits. + */ +static inline u32 make_port_cap_flags(const struct ib_port_attr *attr) +{ + u32 res; + + /* All IBA CapabilityMask bits are passed through here, except bit 26, + * which is overridden with IP_BASED_GIDS. This is due to a historical + * mistake in the implementation of IP_BASED_GIDS. Otherwise all other + * bits match the IBA definition across all kernel versions. + */ + res = attr->port_cap_flags & ~(u32)IB_UVERBS_PCF_IP_BASED_GIDS; + + if (attr->ip_gids) + res |= IB_UVERBS_PCF_IP_BASED_GIDS; + + return res; +} + +void copy_port_attr_to_resp(struct ib_port_attr *attr, + struct ib_uverbs_query_port_resp *resp, + struct ib_device *ib_dev, u8 port_num); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index a93853770e3c..6b12cc5f97b2 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -47,11 +47,134 @@ #include "uverbs.h" #include "core_priv.h" +/* + * Copy a response to userspace. If the provided 'resp' is larger than the + * user buffer it is silently truncated. If the user provided a larger buffer + * then the trailing portion is zero filled. + * + * These semantics are intended to support future extension of the output + * structures. + */ +static int uverbs_response(struct uverbs_attr_bundle *attrs, const void *resp, + size_t resp_len) +{ + int ret; + + if (copy_to_user(attrs->ucore.outbuf, resp, + min(attrs->ucore.outlen, resp_len))) + return -EFAULT; + + if (resp_len < attrs->ucore.outlen) { + /* + * Zero fill any extra memory that user + * space might have provided. + */ + ret = clear_user(attrs->ucore.outbuf + resp_len, + attrs->ucore.outlen - resp_len); + if (ret) + return -EFAULT; + } + + return 0; +} + +/* + * Copy a request from userspace. If the provided 'req' is larger than the + * user buffer then the user buffer is zero extended into the 'req'. If 'req' + * is smaller than the user buffer then the uncopied bytes in the user buffer + * must be zero. + */ +static int uverbs_request(struct uverbs_attr_bundle *attrs, void *req, + size_t req_len) +{ + if (copy_from_user(req, attrs->ucore.inbuf, + min(attrs->ucore.inlen, req_len))) + return -EFAULT; + + if (attrs->ucore.inlen < req_len) { + memset(req + attrs->ucore.inlen, 0, + req_len - attrs->ucore.inlen); + } else if (attrs->ucore.inlen > req_len) { + if (!ib_is_buffer_cleared(attrs->ucore.inbuf + req_len, + attrs->ucore.inlen - req_len)) + return -EOPNOTSUPP; + } + return 0; +} + +/* + * Generate the value for the 'response_length' protocol used by write_ex. + * This is the number of bytes the kernel actually wrote. Userspace can use + * this to detect what structure members in the response the kernel + * understood. + */ +static u32 uverbs_response_length(struct uverbs_attr_bundle *attrs, + size_t resp_len) +{ + return min_t(size_t, attrs->ucore.outlen, resp_len); +} + +/* + * The iterator version of the request interface is for handlers that need to + * step over a flex array at the end of a command header. + */ +struct uverbs_req_iter { + const void __user *cur; + const void __user *end; +}; + +static int uverbs_request_start(struct uverbs_attr_bundle *attrs, + struct uverbs_req_iter *iter, + void *req, + size_t req_len) +{ + if (attrs->ucore.inlen < req_len) + return -ENOSPC; + + if (copy_from_user(req, attrs->ucore.inbuf, req_len)) + return -EFAULT; + + iter->cur = attrs->ucore.inbuf + req_len; + iter->end = attrs->ucore.inbuf + attrs->ucore.inlen; + return 0; +} + +static int uverbs_request_next(struct uverbs_req_iter *iter, void *val, + size_t len) +{ + if (iter->cur + len > iter->end) + return -ENOSPC; + + if (copy_from_user(val, iter->cur, len)) + return -EFAULT; + + iter->cur += len; + return 0; +} + +static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter, + size_t len) +{ + const void __user *res = iter->cur; + + if (iter->cur + len > iter->end) + return ERR_PTR(-ENOSPC); + iter->cur += len; + return res; +} + +static int uverbs_request_finish(struct uverbs_req_iter *iter) +{ + if (!ib_is_buffer_cleared(iter->cur, iter->end - iter->cur)) + return -EOPNOTSUPP; + return 0; +} + static struct ib_uverbs_completion_event_file * -_ib_uverbs_lookup_comp_file(s32 fd, struct ib_uverbs_file *ufile) +_ib_uverbs_lookup_comp_file(s32 fd, const struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = ufd_get_read(UVERBS_OBJECT_COMP_CHANNEL, - fd, ufile); + fd, attrs); if (IS_ERR(uobj)) return (void *)uobj; @@ -65,24 +188,20 @@ _ib_uverbs_lookup_comp_file(s32 fd, struct ib_uverbs_file *ufile) #define ib_uverbs_lookup_comp_file(_fd, _ufile) \ _ib_uverbs_lookup_comp_file((_fd)*typecheck(s32, _fd), _ufile) -ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, - const char __user *buf, - int in_len, int out_len) +static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs) { + struct ib_uverbs_file *file = attrs->ufile; struct ib_uverbs_get_context cmd; struct ib_uverbs_get_context_resp resp; - struct ib_udata udata; struct ib_ucontext *ucontext; struct file *filp; struct ib_rdmacg_object cg_obj; struct ib_device *ib_dev; int ret; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; mutex_lock(&file->ucontext_lock); ib_dev = srcu_dereference(file->device->ib_dev, @@ -97,16 +216,11 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, goto err; } - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); - ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); if (ret) goto err; - ucontext = ib_dev->alloc_ucontext(ib_dev, &udata); + ucontext = ib_dev->ops.alloc_ucontext(ib_dev, &attrs->driver_udata); if (IS_ERR(ucontext)) { ret = PTR_ERR(ucontext); goto err_alloc; @@ -141,13 +255,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, goto err_fd; } - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) goto err_file; - } fd_install(resp.async_fd, filp); + ucontext->res.type = RDMA_RESTRACK_CTX; + rdma_restrack_uadd(&ucontext->res); + /* * Make sure that ib_uverbs_get_ucontext() sees the pointer update * only after all writes to setup the ucontext have completed @@ -156,7 +272,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, mutex_unlock(&file->ucontext_lock); - return in_len; + return 0; err_file: ib_uverbs_free_async_event_file(file); @@ -166,7 +282,7 @@ err_fd: put_unused_fd(resp.async_fd); err_free: - ib_dev->dealloc_ucontext(ucontext); + ib_dev->ops.dealloc_ucontext(ucontext); err_alloc: ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); @@ -224,57 +340,28 @@ static void copy_query_dev_fields(struct ib_ucontext *ucontext, resp->phys_port_cnt = ib_dev->phys_port_cnt; } -ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, - const char __user *buf, - int in_len, int out_len) +static int ib_uverbs_query_device(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_query_device cmd; struct ib_uverbs_query_device_resp resp; struct ib_ucontext *ucontext; + int ret; - ucontext = ib_uverbs_get_ucontext(file); + ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; memset(&resp, 0, sizeof resp); copy_query_dev_fields(ucontext, &resp, &ucontext->device->attrs); - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - return -EFAULT; - - return in_len; -} - -/* - * ib_uverbs_query_port_resp.port_cap_flags started out as just a copy of the - * PortInfo CapabilityMask, but was extended with unique bits. - */ -static u32 make_port_cap_flags(const struct ib_port_attr *attr) -{ - u32 res; - - /* All IBA CapabilityMask bits are passed through here, except bit 26, - * which is overridden with IP_BASED_GIDS. This is due to a historical - * mistake in the implementation of IP_BASED_GIDS. Otherwise all other - * bits match the IBA definition across all kernel versions. - */ - res = attr->port_cap_flags & ~(u32)IB_UVERBS_PCF_IP_BASED_GIDS; - - if (attr->ip_gids) - res |= IB_UVERBS_PCF_IP_BASED_GIDS; - - return res; + return uverbs_response(attrs, &resp, sizeof(resp)); } -ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, - const char __user *buf, - int in_len, int out_len) +static int ib_uverbs_query_port(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_query_port cmd; struct ib_uverbs_query_port_resp resp; @@ -283,88 +370,43 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, struct ib_ucontext *ucontext; struct ib_device *ib_dev; - ucontext = ib_uverbs_get_ucontext(file); + ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ib_dev = ucontext->device; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; ret = ib_query_port(ib_dev, cmd.port_num, &attr); if (ret) return ret; memset(&resp, 0, sizeof resp); + copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num); - resp.state = attr.state; - resp.max_mtu = attr.max_mtu; - resp.active_mtu = attr.active_mtu; - resp.gid_tbl_len = attr.gid_tbl_len; - resp.port_cap_flags = make_port_cap_flags(&attr); - resp.max_msg_sz = attr.max_msg_sz; - resp.bad_pkey_cntr = attr.bad_pkey_cntr; - resp.qkey_viol_cntr = attr.qkey_viol_cntr; - resp.pkey_tbl_len = attr.pkey_tbl_len; - - if (rdma_is_grh_required(ib_dev, cmd.port_num)) - resp.flags |= IB_UVERBS_QPF_GRH_REQUIRED; - - if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) { - resp.lid = OPA_TO_IB_UCAST_LID(attr.lid); - resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid); - } else { - resp.lid = ib_lid_cpu16(attr.lid); - resp.sm_lid = ib_lid_cpu16(attr.sm_lid); - } - resp.lmc = attr.lmc; - resp.max_vl_num = attr.max_vl_num; - resp.sm_sl = attr.sm_sl; - resp.subnet_timeout = attr.subnet_timeout; - resp.init_type_reply = attr.init_type_reply; - resp.active_width = attr.active_width; - resp.active_speed = attr.active_speed; - resp.phys_state = attr.phys_state; - resp.link_layer = rdma_port_get_link_layer(ib_dev, - cmd.port_num); - - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - return -EFAULT; - - return in_len; + return uverbs_response(attrs, &resp, sizeof(resp)); } -ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, - const char __user *buf, - int in_len, int out_len) +static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_alloc_pd cmd; struct ib_uverbs_alloc_pd_resp resp; - struct ib_udata udata; struct ib_uobject *uobj; struct ib_pd *pd; int ret; struct ib_device *ib_dev; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - uobj = uobj_alloc(UVERBS_OBJECT_PD, file, &ib_dev); + uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); - pd = ib_dev->alloc_pd(ib_dev, uobj->context, &udata); + pd = ib_dev->ops.alloc_pd(ib_dev, uobj->context, &attrs->driver_udata); if (IS_ERR(pd)) { ret = PTR_ERR(pd); goto err; @@ -379,14 +421,13 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, memset(&resp, 0, sizeof resp); resp.pd_handle = uobj->id; pd->res.type = RDMA_RESTRACK_PD; - rdma_restrack_add(&pd->res); + rdma_restrack_uadd(&pd->res); - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) goto err_copy; - } - return uobj_alloc_commit(uobj, in_len); + return uobj_alloc_commit(uobj); err_copy: ib_dealloc_pd(pd); @@ -396,17 +437,16 @@ err: return ret; } -ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, - const char __user *buf, - int in_len, int out_len) +static int ib_uverbs_dealloc_pd(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_dealloc_pd cmd; + int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, file, - in_len); + return uobj_perform_destroy(UVERBS_OBJECT_PD, cmd.pd_handle, attrs); } struct xrcd_table_entry { @@ -494,13 +534,11 @@ static void xrcd_table_delete(struct ib_uverbs_device *dev, } } -ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_open_xrcd(struct uverbs_attr_bundle *attrs) { + struct ib_uverbs_device *ibudev = attrs->ufile->device; struct ib_uverbs_open_xrcd cmd; struct ib_uverbs_open_xrcd_resp resp; - struct ib_udata udata; struct ib_uxrcd_object *obj; struct ib_xrcd *xrcd = NULL; struct fd f = {NULL, 0}; @@ -509,18 +547,11 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, int new_xrcd = 0; struct ib_device *ib_dev; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - mutex_lock(&file->device->xrcd_tree_mutex); + mutex_lock(&ibudev->xrcd_tree_mutex); if (cmd.fd != -1) { /* search for file descriptor */ @@ -531,7 +562,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, } inode = file_inode(f.file); - xrcd = find_xrcd(file->device, inode); + xrcd = find_xrcd(ibudev, inode); if (!xrcd && !(cmd.oflags & O_CREAT)) { /* no file descriptor. Need CREATE flag */ ret = -EAGAIN; @@ -544,7 +575,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, } } - obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, file, + obj = (struct ib_uxrcd_object *)uobj_alloc(UVERBS_OBJECT_XRCD, attrs, &ib_dev); if (IS_ERR(obj)) { ret = PTR_ERR(obj); @@ -552,7 +583,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, } if (!xrcd) { - xrcd = ib_dev->alloc_xrcd(ib_dev, obj->uobject.context, &udata); + xrcd = ib_dev->ops.alloc_xrcd(ib_dev, obj->uobject.context, + &attrs->driver_udata); if (IS_ERR(xrcd)) { ret = PTR_ERR(xrcd); goto err; @@ -574,29 +606,28 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, if (inode) { if (new_xrcd) { /* create new inode/xrcd table entry */ - ret = xrcd_table_insert(file->device, inode, xrcd); + ret = xrcd_table_insert(ibudev, inode, xrcd); if (ret) goto err_dealloc_xrcd; } atomic_inc(&xrcd->usecnt); } - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) goto err_copy; - } if (f.file) fdput(f); - mutex_unlock(&file->device->xrcd_tree_mutex); + mutex_unlock(&ibudev->xrcd_tree_mutex); - return uobj_alloc_commit(&obj->uobject, in_len); + return uobj_alloc_commit(&obj->uobject); err_copy: if (inode) { if (new_xrcd) - xrcd_table_delete(file->device, inode); + xrcd_table_delete(ibudev, inode); atomic_dec(&xrcd->usecnt); } @@ -610,22 +641,21 @@ err_tree_mutex_unlock: if (f.file) fdput(f); - mutex_unlock(&file->device->xrcd_tree_mutex); + mutex_unlock(&ibudev->xrcd_tree_mutex); return ret; } -ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_close_xrcd(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_close_xrcd cmd; + int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, file, - in_len); + return uobj_perform_destroy(UVERBS_OBJECT_XRCD, cmd.xrcd_handle, attrs); } int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, @@ -653,29 +683,19 @@ int ib_uverbs_dealloc_xrcd(struct ib_uobject *uobject, return ret; } -ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_reg_mr cmd; struct ib_uverbs_reg_mr_resp resp; - struct ib_udata udata; struct ib_uobject *uobj; struct ib_pd *pd; struct ib_mr *mr; int ret; struct ib_device *ib_dev; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)) return -EINVAL; @@ -684,11 +704,11 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, if (ret) return ret; - uobj = uobj_alloc(UVERBS_OBJECT_MR, file, &ib_dev); + uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); - pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file); + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!pd) { ret = -EINVAL; goto err_free; @@ -703,8 +723,9 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, } } - mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, - cmd.access_flags, &udata); + mr = pd->device->ops.reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va, + cmd.access_flags, + &attrs->driver_udata); if (IS_ERR(mr)) { ret = PTR_ERR(mr); goto err_put; @@ -716,7 +737,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, mr->uobject = uobj; atomic_inc(&pd->usecnt); mr->res.type = RDMA_RESTRACK_MR; - rdma_restrack_add(&mr->res); + rdma_restrack_uadd(&mr->res); uobj->object = mr; @@ -725,14 +746,13 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, resp.rkey = mr->rkey; resp.mr_handle = uobj->id; - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) goto err_copy; - } uobj_put_obj_read(pd); - return uobj_alloc_commit(uobj, in_len); + return uobj_alloc_commit(uobj); err_copy: ib_dereg_mr(mr); @@ -745,29 +765,19 @@ err_free: return ret; } -ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_rereg_mr cmd; struct ib_uverbs_rereg_mr_resp resp; - struct ib_udata udata; struct ib_pd *pd = NULL; struct ib_mr *mr; struct ib_pd *old_pd; int ret; struct ib_uobject *uobj; - if (out_len < sizeof(resp)) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags) return -EINVAL; @@ -777,7 +787,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))) return -EINVAL; - uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, file); + uobj = uobj_get_write(UVERBS_OBJECT_MR, cmd.mr_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); @@ -796,7 +806,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, if (cmd.flags & IB_MR_REREG_PD) { pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, - file); + attrs); if (!pd) { ret = -EINVAL; goto put_uobjs; @@ -804,9 +814,10 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, } old_pd = mr->pd; - ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start, - cmd.length, cmd.hca_va, - cmd.access_flags, pd, &udata); + ret = mr->device->ops.rereg_user_mr(mr, cmd.flags, cmd.start, + cmd.length, cmd.hca_va, + cmd.access_flags, pd, + &attrs->driver_udata); if (!ret) { if (cmd.flags & IB_MR_REREG_PD) { atomic_inc(&pd->usecnt); @@ -821,10 +832,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file, resp.lkey = mr->lkey; resp.rkey = mr->rkey; - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) - ret = -EFAULT; - else - ret = in_len; + ret = uverbs_response(attrs, &resp, sizeof(resp)); put_uobj_pd: if (cmd.flags & IB_MR_REREG_PD) @@ -836,54 +844,43 @@ put_uobjs: return ret; } -ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_dereg_mr(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_dereg_mr cmd; + int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, file, - in_len); + return uobj_perform_destroy(UVERBS_OBJECT_MR, cmd.mr_handle, attrs); } -ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_alloc_mw cmd; struct ib_uverbs_alloc_mw_resp resp; struct ib_uobject *uobj; struct ib_pd *pd; struct ib_mw *mw; - struct ib_udata udata; int ret; struct ib_device *ib_dev; - if (out_len < sizeof(resp)) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - uobj = uobj_alloc(UVERBS_OBJECT_MW, file, &ib_dev); + uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); - pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file); + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!pd) { ret = -EINVAL; goto err_free; } - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); - - mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata); + mw = pd->device->ops.alloc_mw(pd, cmd.mw_type, &attrs->driver_udata); if (IS_ERR(mw)) { ret = PTR_ERR(mw); goto err_put; @@ -900,13 +897,12 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file, resp.rkey = mw->rkey; resp.mw_handle = uobj->id; - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) goto err_copy; - } uobj_put_obj_read(pd); - return uobj_alloc_commit(uobj, in_len); + return uobj_alloc_commit(uobj); err_copy: uverbs_dealloc_mw(mw); @@ -917,36 +913,32 @@ err_free: return ret; } -ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_dealloc_mw(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_dealloc_mw cmd; + int ret; - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, file, - in_len); + return uobj_perform_destroy(UVERBS_OBJECT_MW, cmd.mw_handle, attrs); } -ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_create_comp_channel(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_comp_channel cmd; struct ib_uverbs_create_comp_channel_resp resp; struct ib_uobject *uobj; struct ib_uverbs_completion_event_file *ev_file; struct ib_device *ib_dev; + int ret; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, file, &ib_dev); + uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); @@ -956,25 +948,17 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, uobj); ib_uverbs_init_event_queue(&ev_file->ev_queue); - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) { uobj_alloc_abort(uobj); - return -EFAULT; + return ret; } - return uobj_alloc_commit(uobj, in_len); + return uobj_alloc_commit(uobj); } -static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw, - struct ib_uverbs_ex_create_cq *cmd, - size_t cmd_sz, - int (*cb)(struct ib_uverbs_file *file, - struct ib_ucq_object *obj, - struct ib_uverbs_ex_create_cq_resp *resp, - struct ib_udata *udata, - void *context), - void *context) +static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs, + struct ib_uverbs_ex_create_cq *cmd) { struct ib_ucq_object *obj; struct ib_uverbs_completion_event_file *ev_file = NULL; @@ -984,21 +968,16 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, struct ib_cq_init_attr attr = {}; struct ib_device *ib_dev; - if (cmd->comp_vector >= file->device->num_comp_vectors) + if (cmd->comp_vector >= attrs->ufile->device->num_comp_vectors) return ERR_PTR(-EINVAL); - obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, file, + obj = (struct ib_ucq_object *)uobj_alloc(UVERBS_OBJECT_CQ, attrs, &ib_dev); if (IS_ERR(obj)) return obj; - if (!ib_dev->create_cq) { - ret = -EOPNOTSUPP; - goto err; - } - if (cmd->comp_channel >= 0) { - ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, file); + ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel, attrs); if (IS_ERR(ev_file)) { ret = PTR_ERR(ev_file); goto err; @@ -1013,11 +992,10 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, attr.cqe = cmd->cqe; attr.comp_vector = cmd->comp_vector; + attr.flags = cmd->flags; - if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags)) - attr.flags = cmd->flags; - - cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, uhw); + cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context, + &attrs->driver_udata); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto err_file; @@ -1034,18 +1012,16 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file, memset(&resp, 0, sizeof resp); resp.base.cq_handle = obj->uobject.id; resp.base.cqe = cq->cqe; - - resp.response_length = offsetof(typeof(resp), response_length) + - sizeof(resp.response_length); + resp.response_length = uverbs_response_length(attrs, sizeof(resp)); cq->res.type = RDMA_RESTRACK_CQ; - rdma_restrack_add(&cq->res); + rdma_restrack_uadd(&cq->res); - ret = cb(file, obj, &resp, ucore, context); + ret = uverbs_response(attrs, &resp, sizeof(resp)); if (ret) goto err_cb; - ret = uobj_alloc_commit(&obj->uobject, 0); + ret = uobj_alloc_commit(&obj->uobject); if (ret) return ERR_PTR(ret); return obj; @@ -1055,7 +1031,7 @@ err_cb: err_file: if (ev_file) - ib_uverbs_release_ucq(file, ev_file, obj); + ib_uverbs_release_ucq(attrs->ufile, ev_file, obj); err: uobj_alloc_abort(&obj->uobject); @@ -1063,41 +1039,16 @@ err: return ERR_PTR(ret); } -static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file, - struct ib_ucq_object *obj, - struct ib_uverbs_ex_create_cq_resp *resp, - struct ib_udata *ucore, void *context) -{ - if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) - return -EFAULT; - - return 0; -} - -ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_create_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_cq cmd; struct ib_uverbs_ex_create_cq cmd_ex; - struct ib_uverbs_create_cq_resp resp; - struct ib_udata ucore; - struct ib_udata uhw; struct ib_ucq_object *obj; + int ret; - if (out_len < sizeof(resp)) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; - - ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), - sizeof(cmd), sizeof(resp)); - - ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; memset(&cmd_ex, 0, sizeof(cmd_ex)); cmd_ex.user_handle = cmd.user_handle; @@ -1105,43 +1056,19 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, cmd_ex.comp_vector = cmd.comp_vector; cmd_ex.comp_channel = cmd.comp_channel; - obj = create_cq(file, &ucore, &uhw, &cmd_ex, - offsetof(typeof(cmd_ex), comp_channel) + - sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb, - NULL); - - if (IS_ERR(obj)) - return PTR_ERR(obj); - - return in_len; -} - -static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file, - struct ib_ucq_object *obj, - struct ib_uverbs_ex_create_cq_resp *resp, - struct ib_udata *ucore, void *context) -{ - if (ib_copy_to_udata(ucore, resp, resp->response_length)) - return -EFAULT; - - return 0; + obj = create_cq(attrs, &cmd_ex); + return PTR_ERR_OR_ZERO(obj); } -int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_create_cq(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_create_cq_resp resp; struct ib_uverbs_ex_create_cq cmd; struct ib_ucq_object *obj; - int err; - - if (ucore->inlen < sizeof(cmd)) - return -EINVAL; + int ret; - err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); - if (err) - return err; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; if (cmd.comp_mask) return -EINVAL; @@ -1149,52 +1076,36 @@ int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file, if (cmd.reserved) return -EINVAL; - if (ucore->outlen < (offsetof(typeof(resp), response_length) + - sizeof(resp.response_length))) - return -ENOSPC; - - obj = create_cq(file, ucore, uhw, &cmd, - min(ucore->inlen, sizeof(cmd)), - ib_uverbs_ex_create_cq_cb, NULL); - + obj = create_cq(attrs, &cmd); return PTR_ERR_OR_ZERO(obj); } -ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_resize_cq cmd; struct ib_uverbs_resize_cq_resp resp = {}; - struct ib_udata udata; struct ib_cq *cq; int ret = -EINVAL; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file); + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) return -EINVAL; - ret = cq->device->resize_cq(cq, cmd.cqe, &udata); + ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata); if (ret) goto out; resp.cqe = cq->cqe; - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe)) - ret = -EFAULT; - + ret = uverbs_response(attrs, &resp, sizeof(resp)); out: uobj_put_obj_read(cq); - return ret ? ret : in_len; + return ret; } static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, @@ -1227,9 +1138,7 @@ static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, return 0; } -ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_poll_cq cmd; struct ib_uverbs_poll_cq_resp resp; @@ -1239,15 +1148,16 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, struct ib_wc wc; int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file); + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) return -EINVAL; /* we copy a struct ib_uverbs_poll_cq_resp to user space */ - header_ptr = u64_to_user_ptr(cmd.response); + header_ptr = attrs->ucore.outbuf; data_ptr = header_ptr + sizeof resp; memset(&resp, 0, sizeof resp); @@ -1271,24 +1181,24 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, goto out_put; } - ret = in_len; + ret = 0; out_put: uobj_put_obj_read(cq); return ret; } -ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_req_notify_cq cmd; struct ib_cq *cq; + int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file); + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) return -EINVAL; @@ -1297,22 +1207,22 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, uobj_put_obj_read(cq); - return in_len; + return 0; } -ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_destroy_cq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_cq cmd; struct ib_uverbs_destroy_cq_resp resp; struct ib_uobject *uobj; struct ib_ucq_object *obj; + int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, file); + uobj = uobj_get_destroy(UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); @@ -1323,21 +1233,11 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, uobj_put_destroy(uobj); - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - return -EFAULT; - - return in_len; + return uverbs_response(attrs, &resp, sizeof(resp)); } -static int create_qp(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw, - struct ib_uverbs_ex_create_qp *cmd, - size_t cmd_sz, - int (*cb)(struct ib_uverbs_file *file, - struct ib_uverbs_ex_create_qp_resp *resp, - struct ib_udata *udata), - void *context) +static int create_qp(struct uverbs_attr_bundle *attrs, + struct ib_uverbs_ex_create_qp *cmd) { struct ib_uqp_object *obj; struct ib_device *device; @@ -1347,7 +1247,6 @@ static int create_qp(struct ib_uverbs_file *file, struct ib_cq *scq = NULL, *rcq = NULL; struct ib_srq *srq = NULL; struct ib_qp *qp; - char *buf; struct ib_qp_init_attr attr = {}; struct ib_uverbs_ex_create_qp_resp resp; int ret; @@ -1358,7 +1257,7 @@ static int create_qp(struct ib_uverbs_file *file, if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) return -EPERM; - obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, file, + obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -1366,12 +1265,10 @@ static int create_qp(struct ib_uverbs_file *file, obj->uevent.uobject.user_handle = cmd->user_handle; mutex_init(&obj->mcast_lock); - if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) + - sizeof(cmd->rwq_ind_tbl_handle) && - (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) { + if (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE) { ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL, - cmd->rwq_ind_tbl_handle, file); + cmd->rwq_ind_tbl_handle, attrs); if (!ind_tbl) { ret = -EINVAL; goto err_put; @@ -1380,13 +1277,6 @@ static int create_qp(struct ib_uverbs_file *file, attr.rwq_ind_tbl = ind_tbl; } - if (cmd_sz > sizeof(*cmd) && - !ib_is_udata_cleared(ucore, sizeof(*cmd), - cmd_sz - sizeof(*cmd))) { - ret = -EOPNOTSUPP; - goto err_put; - } - if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) { ret = -EINVAL; goto err_put; @@ -1397,7 +1287,7 @@ static int create_qp(struct ib_uverbs_file *file, if (cmd->qp_type == IB_QPT_XRC_TGT) { xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->pd_handle, - file); + attrs); if (IS_ERR(xrcd_uobj)) { ret = -EINVAL; @@ -1417,7 +1307,7 @@ static int create_qp(struct ib_uverbs_file *file, } else { if (cmd->is_srq) { srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, - cmd->srq_handle, file); + cmd->srq_handle, attrs); if (!srq || srq->srq_type == IB_SRQT_XRC) { ret = -EINVAL; goto err_put; @@ -1428,7 +1318,7 @@ static int create_qp(struct ib_uverbs_file *file, if (cmd->recv_cq_handle != cmd->send_cq_handle) { rcq = uobj_get_obj_read( cq, UVERBS_OBJECT_CQ, - cmd->recv_cq_handle, file); + cmd->recv_cq_handle, attrs); if (!rcq) { ret = -EINVAL; goto err_put; @@ -1439,11 +1329,11 @@ static int create_qp(struct ib_uverbs_file *file, if (has_sq) scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, - cmd->send_cq_handle, file); + cmd->send_cq_handle, attrs); if (!ind_tbl) rcq = rcq ?: scq; pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, - file); + attrs); if (!pd || (!scq && has_sq)) { ret = -EINVAL; goto err_put; @@ -1453,7 +1343,7 @@ static int create_qp(struct ib_uverbs_file *file, } attr.event_handler = ib_uverbs_qp_event_handler; - attr.qp_context = file; + attr.qp_context = attrs->ufile; attr.send_cq = scq; attr.recv_cq = rcq; attr.srq = srq; @@ -1473,10 +1363,7 @@ static int create_qp(struct ib_uverbs_file *file, INIT_LIST_HEAD(&obj->uevent.event_list); INIT_LIST_HEAD(&obj->mcast_list); - if (cmd_sz >= offsetof(typeof(*cmd), create_flags) + - sizeof(cmd->create_flags)) - attr.create_flags = cmd->create_flags; - + attr.create_flags = cmd->create_flags; if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | IB_QP_CREATE_CROSS_CHANNEL | IB_QP_CREATE_MANAGED_SEND | @@ -1498,18 +1385,10 @@ static int create_qp(struct ib_uverbs_file *file, attr.source_qpn = cmd->source_qpn; } - buf = (void *)cmd + sizeof(*cmd); - if (cmd_sz > sizeof(*cmd)) - if (!(buf[0] == 0 && !memcmp(buf, buf + 1, - cmd_sz - sizeof(*cmd) - 1))) { - ret = -EINVAL; - goto err_put; - } - if (cmd->qp_type == IB_QPT_XRC_TGT) qp = ib_create_qp(pd, &attr); else - qp = _ib_create_qp(device, pd, &attr, uhw, + qp = _ib_create_qp(device, pd, &attr, &attrs->driver_udata, &obj->uevent.uobject); if (IS_ERR(qp)) { @@ -1557,11 +1436,9 @@ static int create_qp(struct ib_uverbs_file *file, resp.base.max_recv_wr = attr.cap.max_recv_wr; resp.base.max_send_wr = attr.cap.max_send_wr; resp.base.max_inline_data = attr.cap.max_inline_data; + resp.response_length = uverbs_response_length(attrs, sizeof(resp)); - resp.response_length = offsetof(typeof(resp), response_length) + - sizeof(resp.response_length); - - ret = cb(file, &resp, ucore); + ret = uverbs_response(attrs, &resp, sizeof(resp)); if (ret) goto err_cb; @@ -1583,7 +1460,7 @@ static int create_qp(struct ib_uverbs_file *file, if (ind_tbl) uobj_put_obj_read(ind_tbl); - return uobj_alloc_commit(&obj->uevent.uobject, 0); + return uobj_alloc_commit(&obj->uevent.uobject); err_cb: ib_destroy_qp(qp); @@ -1605,39 +1482,15 @@ err_put: return ret; } -static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file, - struct ib_uverbs_ex_create_qp_resp *resp, - struct ib_udata *ucore) -{ - if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base))) - return -EFAULT; - - return 0; -} - -ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_create_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_qp cmd; struct ib_uverbs_ex_create_qp cmd_ex; - struct ib_udata ucore; - struct ib_udata uhw; - ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp); - int err; - - if (out_len < resp_size) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof(cmd))) - return -EFAULT; + int ret; - ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response), - sizeof(cmd), resp_size); - ib_uverbs_init_udata(&uhw, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + resp_size, - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - resp_size); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; memset(&cmd_ex, 0, sizeof(cmd_ex)); cmd_ex.user_handle = cmd.user_handle; @@ -1654,42 +1507,17 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, cmd_ex.qp_type = cmd.qp_type; cmd_ex.is_srq = cmd.is_srq; - err = create_qp(file, &ucore, &uhw, &cmd_ex, - offsetof(typeof(cmd_ex), is_srq) + - sizeof(cmd.is_srq), ib_uverbs_create_qp_cb, - NULL); - - if (err) - return err; - - return in_len; + return create_qp(attrs, &cmd_ex); } -static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file, - struct ib_uverbs_ex_create_qp_resp *resp, - struct ib_udata *ucore) +static int ib_uverbs_ex_create_qp(struct uverbs_attr_bundle *attrs) { - if (ib_copy_to_udata(ucore, resp, resp->response_length)) - return -EFAULT; - - return 0; -} - -int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) -{ - struct ib_uverbs_ex_create_qp_resp resp; - struct ib_uverbs_ex_create_qp cmd = {0}; - int err; - - if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) + - sizeof(cmd.comp_mask))) - return -EINVAL; + struct ib_uverbs_ex_create_qp cmd; + int ret; - err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); - if (err) - return err; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK) return -EINVAL; @@ -1697,26 +1525,13 @@ int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file, if (cmd.reserved) return -EINVAL; - if (ucore->outlen < (offsetof(typeof(resp), response_length) + - sizeof(resp.response_length))) - return -ENOSPC; - - err = create_qp(file, ucore, uhw, &cmd, - min(ucore->inlen, sizeof(cmd)), - ib_uverbs_ex_create_qp_cb, NULL); - - if (err) - return err; - - return 0; + return create_qp(attrs, &cmd); } -ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, - const char __user *buf, int in_len, int out_len) +static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_open_qp cmd; struct ib_uverbs_create_qp_resp resp; - struct ib_udata udata; struct ib_uqp_object *obj; struct ib_xrcd *xrcd; struct ib_uobject *uninitialized_var(xrcd_uobj); @@ -1725,23 +1540,16 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, int ret; struct ib_device *ib_dev; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, file, + obj = (struct ib_uqp_object *)uobj_alloc(UVERBS_OBJECT_QP, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); - xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, file); + xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd.pd_handle, attrs); if (IS_ERR(xrcd_uobj)) { ret = -EINVAL; goto err_put; @@ -1754,7 +1562,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, } attr.event_handler = ib_uverbs_qp_event_handler; - attr.qp_context = file; + attr.qp_context = attrs->ufile; attr.qp_num = cmd.qpn; attr.qp_type = cmd.qp_type; @@ -1775,17 +1583,16 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, resp.qpn = qp->qp_num; resp.qp_handle = obj->uevent.uobject.id; - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) goto err_destroy; - } obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); atomic_inc(&obj->uxrcd->refcnt); qp->uobject = &obj->uevent.uobject; uobj_put_read(xrcd_uobj); - return uobj_alloc_commit(&obj->uevent.uobject, in_len); + return uobj_alloc_commit(&obj->uevent.uobject); err_destroy: ib_destroy_qp(qp); @@ -1818,9 +1625,7 @@ static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr, uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr); } -ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_query_qp cmd; struct ib_uverbs_query_qp_resp resp; @@ -1829,8 +1634,9 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, struct ib_qp_init_attr *init_attr; int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; attr = kmalloc(sizeof *attr, GFP_KERNEL); init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL); @@ -1839,7 +1645,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, goto out; } - qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file); + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) { ret = -EINVAL; goto out; @@ -1886,14 +1692,13 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, resp.max_inline_data = init_attr->cap.max_inline_data; resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR; - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); out: kfree(attr); kfree(init_attr); - return ret ? ret : in_len; + return ret; } /* Remove ignored fields set in the attribute mask */ @@ -1933,8 +1738,8 @@ static void copy_ah_attr_from_uverbs(struct ib_device *dev, rdma_ah_set_make_grd(rdma_attr, false); } -static int modify_qp(struct ib_uverbs_file *file, - struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) +static int modify_qp(struct uverbs_attr_bundle *attrs, + struct ib_uverbs_ex_modify_qp *cmd) { struct ib_qp_attr *attr; struct ib_qp *qp; @@ -1944,7 +1749,8 @@ static int modify_qp(struct ib_uverbs_file *file, if (!attr) return -ENOMEM; - qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, file); + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, + attrs); if (!qp) { ret = -EINVAL; goto out; @@ -2081,7 +1887,7 @@ static int modify_qp(struct ib_uverbs_file *file, ret = ib_modify_qp_with_udata(qp, attr, modify_qp_mask(qp->qp_type, cmd->base.attr_mask), - udata); + &attrs->driver_udata); release_qp: uobj_put_obj_read(qp); @@ -2091,80 +1897,64 @@ out: return ret; } -ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_modify_qp(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_modify_qp cmd = {}; - struct ib_udata udata; + struct ib_uverbs_ex_modify_qp cmd; int ret; - if (copy_from_user(&cmd.base, buf, sizeof(cmd.base))) - return -EFAULT; + ret = uverbs_request(attrs, &cmd.base, sizeof(cmd.base)); + if (ret) + return ret; if (cmd.base.attr_mask & ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) return -EOPNOTSUPP; - ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL, - in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr), - out_len); - - ret = modify_qp(file, &cmd, &udata); - if (ret) - return ret; - - return in_len; + return modify_qp(attrs, &cmd); } -int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_modify_qp(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_modify_qp cmd = {}; + struct ib_uverbs_ex_modify_qp cmd; + struct ib_uverbs_ex_modify_qp_resp resp = { + .response_length = uverbs_response_length(attrs, sizeof(resp)) + }; int ret; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; + /* * Last bit is reserved for extending the attr_mask by * using another field. */ BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31)); - if (ucore->inlen < sizeof(cmd.base)) - return -EINVAL; - - ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); - if (ret) - return ret; - if (cmd.base.attr_mask & ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) return -EOPNOTSUPP; - if (ucore->inlen > sizeof(cmd)) { - if (!ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) - return -EOPNOTSUPP; - } - - ret = modify_qp(file, &cmd, uhw); + ret = modify_qp(attrs, &cmd); + if (ret) + return ret; - return ret; + return uverbs_response(attrs, &resp, sizeof(resp)); } -ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_destroy_qp(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_qp cmd; struct ib_uverbs_destroy_qp_resp resp; struct ib_uobject *uobj; struct ib_uqp_object *obj; + int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, file); + uobj = uobj_get_destroy(UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); @@ -2174,10 +1964,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, uobj_put_destroy(uobj); - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - return -EFAULT; - - return in_len; + return uverbs_response(attrs, &resp, sizeof(resp)); } static void *alloc_wr(size_t wr_size, __u32 num_sge) @@ -2190,9 +1977,7 @@ static void *alloc_wr(size_t wr_size, __u32 num_sge) num_sge * sizeof (struct ib_sge), GFP_KERNEL); } -ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_post_send cmd; struct ib_uverbs_post_send_resp resp; @@ -2202,24 +1987,31 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, struct ib_qp *qp; int i, sg_ind; int is_ud; - ssize_t ret = -EINVAL; + int ret, ret2; size_t next_size; + const struct ib_sge __user *sgls; + const void __user *wqes; + struct uverbs_req_iter iter; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count + - cmd.sge_count * sizeof (struct ib_uverbs_sge)) - return -EINVAL; - - if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr)) - return -EINVAL; + ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); + if (ret) + return ret; + wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count); + if (IS_ERR(wqes)) + return PTR_ERR(wqes); + sgls = uverbs_request_next_ptr( + &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge)); + if (IS_ERR(sgls)) + return PTR_ERR(sgls); + ret = uverbs_request_finish(&iter); + if (ret) + return ret; user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL); if (!user_wr) return -ENOMEM; - qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file); + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) goto out; @@ -2227,8 +2019,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, sg_ind = 0; last = NULL; for (i = 0; i < cmd.wr_count; ++i) { - if (copy_from_user(user_wr, - buf + sizeof cmd + i * cmd.wqe_size, + if (copy_from_user(user_wr, wqes + i * cmd.wqe_size, cmd.wqe_size)) { ret = -EFAULT; goto out_put; @@ -2256,7 +2047,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, } ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, - user_wr->wr.ud.ah, file); + user_wr->wr.ud.ah, attrs); if (!ud->ah) { kfree(ud); ret = -EINVAL; @@ -2336,11 +2127,9 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, if (next->num_sge) { next->sg_list = (void *) next + ALIGN(next_size, sizeof(struct ib_sge)); - if (copy_from_user(next->sg_list, - buf + sizeof cmd + - cmd.wr_count * cmd.wqe_size + - sg_ind * sizeof (struct ib_sge), - next->num_sge * sizeof (struct ib_sge))) { + if (copy_from_user(next->sg_list, sgls + sg_ind, + next->num_sge * + sizeof(struct ib_sge))) { ret = -EFAULT; goto out_put; } @@ -2350,7 +2139,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, } resp.bad_wr = 0; - ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); + ret = qp->device->ops.post_send(qp->real_qp, wr, &bad_wr); if (ret) for (next = wr; next; next = next->next) { ++resp.bad_wr; @@ -2358,8 +2147,9 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, break; } - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - ret = -EFAULT; + ret2 = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret2) + ret = ret2; out_put: uobj_put_obj_read(qp); @@ -2375,28 +2165,35 @@ out_put: out: kfree(user_wr); - return ret ? ret : in_len; + return ret; } -static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, - int in_len, - u32 wr_count, - u32 sge_count, - u32 wqe_size) +static struct ib_recv_wr * +ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count, + u32 wqe_size, u32 sge_count) { struct ib_uverbs_recv_wr *user_wr; struct ib_recv_wr *wr = NULL, *last, *next; int sg_ind; int i; int ret; - - if (in_len < wqe_size * wr_count + - sge_count * sizeof (struct ib_uverbs_sge)) - return ERR_PTR(-EINVAL); + const struct ib_sge __user *sgls; + const void __user *wqes; if (wqe_size < sizeof (struct ib_uverbs_recv_wr)) return ERR_PTR(-EINVAL); + wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count); + if (IS_ERR(wqes)) + return ERR_CAST(wqes); + sgls = uverbs_request_next_ptr( + iter, sge_count * sizeof(struct ib_uverbs_sge)); + if (IS_ERR(sgls)) + return ERR_CAST(sgls); + ret = uverbs_request_finish(iter); + if (ret) + return ERR_PTR(ret); + user_wr = kmalloc(wqe_size, GFP_KERNEL); if (!user_wr) return ERR_PTR(-ENOMEM); @@ -2404,7 +2201,7 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, sg_ind = 0; last = NULL; for (i = 0; i < wr_count; ++i) { - if (copy_from_user(user_wr, buf + i * wqe_size, + if (copy_from_user(user_wr, wqes + i * wqe_size, wqe_size)) { ret = -EFAULT; goto err; @@ -2443,10 +2240,9 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, if (next->num_sge) { next->sg_list = (void *) next + ALIGN(sizeof *next, sizeof (struct ib_sge)); - if (copy_from_user(next->sg_list, - buf + wr_count * wqe_size + - sg_ind * sizeof (struct ib_sge), - next->num_sge * sizeof (struct ib_sge))) { + if (copy_from_user(next->sg_list, sgls + sg_ind, + next->num_sge * + sizeof(struct ib_sge))) { ret = -EFAULT; goto err; } @@ -2470,32 +2266,33 @@ err: return ERR_PTR(ret); } -ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_post_recv cmd; struct ib_uverbs_post_recv_resp resp; struct ib_recv_wr *wr, *next; const struct ib_recv_wr *bad_wr; struct ib_qp *qp; - ssize_t ret = -EINVAL; + int ret, ret2; + struct uverbs_req_iter iter; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); + if (ret) + return ret; - wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, - in_len - sizeof cmd, cmd.wr_count, - cmd.sge_count, cmd.wqe_size); + wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size, + cmd.sge_count); if (IS_ERR(wr)) return PTR_ERR(wr); - qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file); - if (!qp) + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); + if (!qp) { + ret = -EINVAL; goto out; + } resp.bad_wr = 0; - ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); + ret = qp->device->ops.post_recv(qp->real_qp, wr, &bad_wr); uobj_put_obj_read(qp); if (ret) { @@ -2506,9 +2303,9 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, } } - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - ret = -EFAULT; - + ret2 = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret2) + ret = ret2; out: while (wr) { next = wr->next; @@ -2516,36 +2313,36 @@ out: wr = next; } - return ret ? ret : in_len; + return ret; } -ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_post_srq_recv cmd; struct ib_uverbs_post_srq_recv_resp resp; struct ib_recv_wr *wr, *next; const struct ib_recv_wr *bad_wr; struct ib_srq *srq; - ssize_t ret = -EINVAL; + int ret, ret2; + struct uverbs_req_iter iter; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); + if (ret) + return ret; - wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd, - in_len - sizeof cmd, cmd.wr_count, - cmd.sge_count, cmd.wqe_size); + wr = ib_uverbs_unmarshall_recv(&iter, cmd.wr_count, cmd.wqe_size, + cmd.sge_count); if (IS_ERR(wr)) return PTR_ERR(wr); - srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file); - if (!srq) + srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); + if (!srq) { + ret = -EINVAL; goto out; + } resp.bad_wr = 0; - ret = srq->device->post_srq_recv ? - srq->device->post_srq_recv(srq, wr, &bad_wr) : -EOPNOTSUPP; + ret = srq->device->ops.post_srq_recv(srq, wr, &bad_wr); uobj_put_obj_read(srq); @@ -2556,8 +2353,9 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file, break; } - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - ret = -EFAULT; + ret2 = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret2) + ret = ret2; out: while (wr) { @@ -2566,12 +2364,10 @@ out: wr = next; } - return ret ? ret : in_len; + return ret; } -ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_ah cmd; struct ib_uverbs_create_ah_resp resp; @@ -2580,21 +2376,13 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, struct ib_ah *ah; struct rdma_ah_attr attr = {}; int ret; - struct ib_udata udata; struct ib_device *ib_dev; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - uobj = uobj_alloc(UVERBS_OBJECT_AH, file, &ib_dev); + uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev); if (IS_ERR(uobj)) return PTR_ERR(uobj); @@ -2603,7 +2391,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, goto err; } - pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file); + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!pd) { ret = -EINVAL; goto err; @@ -2627,7 +2415,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, rdma_ah_set_ah_flags(&attr, 0); } - ah = rdma_create_user_ah(pd, &attr, &udata); + ah = rdma_create_user_ah(pd, &attr, &attrs->driver_udata); if (IS_ERR(ah)) { ret = PTR_ERR(ah); goto err_put; @@ -2639,16 +2427,15 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, resp.ah_handle = uobj->id; - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) { - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) goto err_copy; - } uobj_put_obj_read(pd); - return uobj_alloc_commit(uobj, in_len); + return uobj_alloc_commit(uobj); err_copy: - rdma_destroy_ah(ah); + rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE); err_put: uobj_put_obj_read(pd); @@ -2658,21 +2445,19 @@ err: return ret; } -ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file, - const char __user *buf, int in_len, int out_len) +static int ib_uverbs_destroy_ah(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_ah cmd; + int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, file, - in_len); + return uobj_perform_destroy(UVERBS_OBJECT_AH, cmd.ah_handle, attrs); } -ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_attach_mcast cmd; struct ib_qp *qp; @@ -2680,10 +2465,11 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, struct ib_uverbs_mcast_entry *mcast; int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file); + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) return -EINVAL; @@ -2716,12 +2502,10 @@ out_put: mutex_unlock(&obj->mcast_lock); uobj_put_obj_read(qp); - return ret ? ret : in_len; + return ret; } -ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_detach_mcast cmd; struct ib_uqp_object *obj; @@ -2730,10 +2514,11 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, int ret = -EINVAL; bool found = false; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file); + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) return -EINVAL; @@ -2759,7 +2544,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, out_put: mutex_unlock(&obj->mcast_lock); uobj_put_obj_read(qp); - return ret ? ret : in_len; + return ret; } struct ib_uflow_resources *flow_resources_alloc(size_t num_specs) @@ -2838,7 +2623,7 @@ void flow_resources_add(struct ib_uflow_resources *uflow_res, } EXPORT_SYMBOL(flow_resources_add); -static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile, +static int kern_spec_to_ib_spec_action(const struct uverbs_attr_bundle *attrs, struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec, struct ib_uflow_resources *uflow_res) @@ -2867,7 +2652,7 @@ static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile, ib_spec->action.act = uobj_get_obj_read(flow_action, UVERBS_OBJECT_FLOW_ACTION, kern_spec->action.handle, - ufile); + attrs); if (!ib_spec->action.act) return -EINVAL; ib_spec->action.size = @@ -2885,7 +2670,7 @@ static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile, uobj_get_obj_read(counters, UVERBS_OBJECT_COUNTERS, kern_spec->flow_count.handle, - ufile); + attrs); if (!ib_spec->flow_count.counters) return -EINVAL; ib_spec->flow_count.size = @@ -3066,7 +2851,7 @@ static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, kern_filter_sz, ib_spec); } -static int kern_spec_to_ib_spec(struct ib_uverbs_file *ufile, +static int kern_spec_to_ib_spec(struct uverbs_attr_bundle *attrs, struct ib_uverbs_flow_spec *kern_spec, union ib_flow_spec *ib_spec, struct ib_uflow_resources *uflow_res) @@ -3075,17 +2860,15 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_file *ufile, return -EINVAL; if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG) - return kern_spec_to_ib_spec_action(ufile, kern_spec, ib_spec, + return kern_spec_to_ib_spec_action(attrs, kern_spec, ib_spec, uflow_res); else return kern_spec_to_ib_spec_filter(kern_spec, ib_spec); } -int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_create_wq cmd = {}; + struct ib_uverbs_ex_create_wq cmd; struct ib_uverbs_ex_create_wq_resp resp = {}; struct ib_uwq_object *obj; int err = 0; @@ -3093,43 +2876,27 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, struct ib_pd *pd; struct ib_wq *wq; struct ib_wq_init_attr wq_init_attr = {}; - size_t required_cmd_sz; - size_t required_resp_len; struct ib_device *ib_dev; - required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge); - required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn); - - if (ucore->inlen < required_cmd_sz) - return -EINVAL; - - if (ucore->outlen < required_resp_len) - return -ENOSPC; - - if (ucore->inlen > sizeof(cmd) && - !ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) - return -EOPNOTSUPP; - - err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); + err = uverbs_request(attrs, &cmd, sizeof(cmd)); if (err) return err; if (cmd.comp_mask) return -EOPNOTSUPP; - obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, file, + obj = (struct ib_uwq_object *)uobj_alloc(UVERBS_OBJECT_WQ, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); - pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, file); + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); if (!pd) { err = -EINVAL; goto err_uobj; } - cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file); + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) { err = -EINVAL; goto err_put_pd; @@ -3138,20 +2905,14 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, wq_init_attr.cq = cq; wq_init_attr.max_sge = cmd.max_sge; wq_init_attr.max_wr = cmd.max_wr; - wq_init_attr.wq_context = file; + wq_init_attr.wq_context = attrs->ufile; wq_init_attr.wq_type = cmd.wq_type; wq_init_attr.event_handler = ib_uverbs_wq_event_handler; - if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) + - sizeof(cmd.create_flags))) - wq_init_attr.create_flags = cmd.create_flags; + wq_init_attr.create_flags = cmd.create_flags; obj->uevent.events_reported = 0; INIT_LIST_HEAD(&obj->uevent.event_list); - if (!pd->device->create_wq) { - err = -EOPNOTSUPP; - goto err_put_cq; - } - wq = pd->device->create_wq(pd, &wq_init_attr, uhw); + wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata); if (IS_ERR(wq)) { err = PTR_ERR(wq); goto err_put_cq; @@ -3175,15 +2936,14 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file, resp.max_sge = wq_init_attr.max_sge; resp.max_wr = wq_init_attr.max_wr; resp.wqn = wq->wq_num; - resp.response_length = required_resp_len; - err = ib_copy_to_udata(ucore, - &resp, resp.response_length); + resp.response_length = uverbs_response_length(attrs, sizeof(resp)); + err = uverbs_response(attrs, &resp, sizeof(resp)); if (err) goto err_copy; uobj_put_obj_read(pd); uobj_put_obj_read(cq); - return uobj_alloc_commit(&obj->uevent.uobject, 0); + return uobj_alloc_commit(&obj->uevent.uobject); err_copy: ib_destroy_wq(wq); @@ -3197,41 +2957,23 @@ err_uobj: return err; } -int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_destroy_wq(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_destroy_wq cmd = {}; + struct ib_uverbs_ex_destroy_wq cmd; struct ib_uverbs_ex_destroy_wq_resp resp = {}; struct ib_uobject *uobj; struct ib_uwq_object *obj; - size_t required_cmd_sz; - size_t required_resp_len; int ret; - required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle); - required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); - - if (ucore->inlen < required_cmd_sz) - return -EINVAL; - - if (ucore->outlen < required_resp_len) - return -ENOSPC; - - if (ucore->inlen > sizeof(cmd) && - !ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) - return -EOPNOTSUPP; - - ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (cmd.comp_mask) return -EOPNOTSUPP; - resp.response_length = required_resp_len; - uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, file); + resp.response_length = uverbs_response_length(attrs, sizeof(resp)); + uobj = uobj_get_destroy(UVERBS_OBJECT_WQ, cmd.wq_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); @@ -3240,29 +2982,17 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, uobj_put_destroy(uobj); - return ib_copy_to_udata(ucore, &resp, resp.response_length); + return uverbs_response(attrs, &resp, sizeof(resp)); } -int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_modify_wq cmd = {}; + struct ib_uverbs_ex_modify_wq cmd; struct ib_wq *wq; struct ib_wq_attr wq_attr = {}; - size_t required_cmd_sz; int ret; - required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state); - if (ucore->inlen < required_cmd_sz) - return -EINVAL; - - if (ucore->inlen > sizeof(cmd) && - !ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) - return -EOPNOTSUPP; - - ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; @@ -3272,7 +3002,7 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS)) return -EINVAL; - wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, file); + wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs); if (!wq) return -EINVAL; @@ -3282,24 +3012,18 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file, wq_attr.flags = cmd.flags; wq_attr.flags_mask = cmd.flags_mask; } - if (!wq->device->modify_wq) { - ret = -EOPNOTSUPP; - goto out; - } - ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw); -out: + ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask, + &attrs->driver_udata); uobj_put_obj_read(wq); return ret; } -int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_create_rwq_ind_table cmd = {}; + struct ib_uverbs_ex_create_rwq_ind_table cmd; struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {}; struct ib_uobject *uobj; - int err = 0; + int err; struct ib_rwq_ind_table_init_attr init_attr = {}; struct ib_rwq_ind_table *rwq_ind_tbl; struct ib_wq **wqs = NULL; @@ -3307,27 +3031,13 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, struct ib_wq *wq = NULL; int i, j, num_read_wqs; u32 num_wq_handles; - u32 expected_in_size; - size_t required_cmd_sz_header; - size_t required_resp_len; + struct uverbs_req_iter iter; struct ib_device *ib_dev; - required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size); - required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num); - - if (ucore->inlen < required_cmd_sz_header) - return -EINVAL; - - if (ucore->outlen < required_resp_len) - return -ENOSPC; - - err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header); + err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); if (err) return err; - ucore->inbuf += required_cmd_sz_header; - ucore->inlen -= required_cmd_sz_header; - if (cmd.comp_mask) return -EOPNOTSUPP; @@ -3335,26 +3045,17 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, return -EINVAL; num_wq_handles = 1 << cmd.log_ind_tbl_size; - expected_in_size = num_wq_handles * sizeof(__u32); - if (num_wq_handles == 1) - /* input size for wq handles is u64 aligned */ - expected_in_size += sizeof(__u32); - - if (ucore->inlen < expected_in_size) - return -EINVAL; - - if (ucore->inlen > expected_in_size && - !ib_is_udata_cleared(ucore, expected_in_size, - ucore->inlen - expected_in_size)) - return -EOPNOTSUPP; - wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles), GFP_KERNEL); if (!wqs_handles) return -ENOMEM; - err = ib_copy_from_udata(wqs_handles, ucore, - num_wq_handles * sizeof(__u32)); + err = uverbs_request_next(&iter, wqs_handles, + num_wq_handles * sizeof(__u32)); + if (err) + goto err_free; + + err = uverbs_request_finish(&iter); if (err) goto err_free; @@ -3367,7 +3068,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, for (num_read_wqs = 0; num_read_wqs < num_wq_handles; num_read_wqs++) { wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, - wqs_handles[num_read_wqs], file); + wqs_handles[num_read_wqs], attrs); if (!wq) { err = -EINVAL; goto put_wqs; @@ -3376,7 +3077,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, wqs[num_read_wqs] = wq; } - uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, file, &ib_dev); + uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev); if (IS_ERR(uobj)) { err = PTR_ERR(uobj); goto put_wqs; @@ -3385,11 +3086,8 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size; init_attr.ind_tbl = wqs; - if (!ib_dev->create_rwq_ind_table) { - err = -EOPNOTSUPP; - goto err_uobj; - } - rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw); + rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr, + &attrs->driver_udata); if (IS_ERR(rwq_ind_tbl)) { err = PTR_ERR(rwq_ind_tbl); @@ -3408,10 +3106,9 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, resp.ind_tbl_handle = uobj->id; resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num; - resp.response_length = required_resp_len; + resp.response_length = uverbs_response_length(attrs, sizeof(resp)); - err = ib_copy_to_udata(ucore, - &resp, resp.response_length); + err = uverbs_response(attrs, &resp, sizeof(resp)); if (err) goto err_copy; @@ -3420,7 +3117,7 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file, for (j = 0; j < num_read_wqs; j++) uobj_put_obj_read(wqs[j]); - return uobj_alloc_commit(uobj, 0); + return uobj_alloc_commit(uobj); err_copy: ib_destroy_rwq_ind_table(rwq_ind_tbl); @@ -3435,25 +3132,12 @@ err_free: return err; } -int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_destroy_rwq_ind_table(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {}; - int ret; - size_t required_cmd_sz; - - required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle); - - if (ucore->inlen < required_cmd_sz) - return -EINVAL; - - if (ucore->inlen > sizeof(cmd) && - !ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) - return -EOPNOTSUPP; + struct ib_uverbs_ex_destroy_rwq_ind_table cmd; + int ret; - ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; @@ -3461,12 +3145,10 @@ int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file, return -EOPNOTSUPP; return uobj_perform_destroy(UVERBS_OBJECT_RWQ_IND_TBL, - cmd.ind_tbl_handle, file, 0); + cmd.ind_tbl_handle, attrs); } -int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_flow cmd; struct ib_uverbs_create_flow_resp resp; @@ -3477,24 +3159,16 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, struct ib_qp *qp; struct ib_uflow_resources *uflow_res; struct ib_uverbs_flow_spec_hdr *kern_spec; - int err = 0; + struct uverbs_req_iter iter; + int err; void *ib_spec; int i; struct ib_device *ib_dev; - if (ucore->inlen < sizeof(cmd)) - return -EINVAL; - - if (ucore->outlen < sizeof(resp)) - return -ENOSPC; - - err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + err = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); if (err) return err; - ucore->inbuf += sizeof(cmd); - ucore->inlen -= sizeof(cmd); - if (cmd.comp_mask) return -EINVAL; @@ -3512,8 +3186,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS) return -EINVAL; - if (cmd.flow_attr.size > ucore->inlen || - cmd.flow_attr.size > + if (cmd.flow_attr.size > (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec))) return -EINVAL; @@ -3528,21 +3201,25 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, return -ENOMEM; *kern_flow_attr = cmd.flow_attr; - err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore, - cmd.flow_attr.size); + err = uverbs_request_next(&iter, &kern_flow_attr->flow_specs, + cmd.flow_attr.size); if (err) goto err_free_attr; } else { kern_flow_attr = &cmd.flow_attr; } - uobj = uobj_alloc(UVERBS_OBJECT_FLOW, file, &ib_dev); + err = uverbs_request_finish(&iter); + if (err) + goto err_free_attr; + + uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev); if (IS_ERR(uobj)) { err = PTR_ERR(uobj); goto err_free_attr; } - qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, file); + qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); if (!qp) { err = -EINVAL; goto err_uobj; @@ -3553,11 +3230,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, goto err_put; } - if (!qp->device->create_flow) { - err = -EOPNOTSUPP; - goto err_put; - } - flow_attr = kzalloc(struct_size(flow_attr, flows, cmd.flow_attr.num_of_specs), GFP_KERNEL); if (!flow_attr) { @@ -3584,7 +3256,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, cmd.flow_attr.size >= kern_spec->size; i++) { err = kern_spec_to_ib_spec( - file, (struct ib_uverbs_flow_spec *)kern_spec, + attrs, (struct ib_uverbs_flow_spec *)kern_spec, ib_spec, uflow_res); if (err) goto err_free; @@ -3602,8 +3274,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, goto err_free; } - flow_id = qp->device->create_flow(qp, flow_attr, - IB_FLOW_DOMAIN_USER, uhw); + flow_id = qp->device->ops.create_flow( + qp, flow_attr, IB_FLOW_DOMAIN_USER, &attrs->driver_udata); if (IS_ERR(flow_id)) { err = PTR_ERR(flow_id); @@ -3615,8 +3287,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, memset(&resp, 0, sizeof(resp)); resp.flow_handle = uobj->id; - err = ib_copy_to_udata(ucore, - &resp, sizeof(resp)); + err = uverbs_response(attrs, &resp, sizeof(resp)); if (err) goto err_copy; @@ -3624,9 +3295,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, kfree(flow_attr); if (cmd.flow_attr.num_of_specs) kfree(kern_flow_attr); - return uobj_alloc_commit(uobj, 0); + return uobj_alloc_commit(uobj); err_copy: - if (!qp->device->destroy_flow(flow_id)) + if (!qp->device->ops.destroy_flow(flow_id)) atomic_dec(&qp->usecnt); err_free: ib_uverbs_flow_resources_free(uflow_res); @@ -3642,28 +3313,22 @@ err_free_attr: return err; } -int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_destroy_flow(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_flow cmd; int ret; - if (ucore->inlen < sizeof(cmd)) - return -EINVAL; - - ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; if (cmd.comp_mask) return -EINVAL; - return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, file, - 0); + return uobj_perform_destroy(UVERBS_OBJECT_FLOW, cmd.flow_handle, attrs); } -static int __uverbs_create_xsrq(struct ib_uverbs_file *file, +static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, struct ib_uverbs_create_xsrq *cmd, struct ib_udata *udata) { @@ -3676,7 +3341,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, int ret; struct ib_device *ib_dev; - obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, file, + obj = (struct ib_usrq_object *)uobj_alloc(UVERBS_OBJECT_SRQ, attrs, &ib_dev); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -3686,7 +3351,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, if (cmd->srq_type == IB_SRQT_XRC) { xrcd_uobj = uobj_get_read(UVERBS_OBJECT_XRCD, cmd->xrcd_handle, - file); + attrs); if (IS_ERR(xrcd_uobj)) { ret = -EINVAL; goto err; @@ -3704,21 +3369,21 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, if (ib_srq_has_cq(cmd->srq_type)) { attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, - cmd->cq_handle, file); + cmd->cq_handle, attrs); if (!attr.ext.cq) { ret = -EINVAL; goto err_put_xrcd; } } - pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, file); + pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs); if (!pd) { ret = -EINVAL; goto err_put_cq; } attr.event_handler = ib_uverbs_srq_event_handler; - attr.srq_context = file; + attr.srq_context = attrs->ufile; attr.srq_type = cmd->srq_type; attr.attr.max_wr = cmd->max_wr; attr.attr.max_sge = cmd->max_sge; @@ -3727,7 +3392,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, obj->uevent.events_reported = 0; INIT_LIST_HEAD(&obj->uevent.event_list); - srq = pd->device->create_srq(pd, &attr, udata); + srq = pd->device->ops.create_srq(pd, &attr, udata); if (IS_ERR(srq)) { ret = PTR_ERR(srq); goto err_put; @@ -3763,11 +3428,9 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, if (cmd->srq_type == IB_SRQT_XRC) resp.srqn = srq->ext.xrc.srq_num; - if (copy_to_user(u64_to_user_ptr(cmd->response), - &resp, sizeof resp)) { - ret = -EFAULT; + ret = uverbs_response(attrs, &resp, sizeof(resp)); + if (ret) goto err_copy; - } if (cmd->srq_type == IB_SRQT_XRC) uobj_put_read(xrcd_uobj); @@ -3776,7 +3439,7 @@ static int __uverbs_create_xsrq(struct ib_uverbs_file *file, uobj_put_obj_read(attr.ext.cq); uobj_put_obj_read(pd); - return uobj_alloc_commit(&obj->uevent.uobject, 0); + return uobj_alloc_commit(&obj->uevent.uobject); err_copy: ib_destroy_srq(srq); @@ -3799,21 +3462,15 @@ err: return ret; } -ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_create_srq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_srq cmd; struct ib_uverbs_create_xsrq xcmd; - struct ib_uverbs_create_srq_resp resp; - struct ib_udata udata; int ret; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; memset(&xcmd, 0, sizeof(xcmd)); xcmd.response = cmd.response; @@ -3824,77 +3481,48 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, xcmd.max_sge = cmd.max_sge; xcmd.srq_limit = cmd.srq_limit; - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); - - ret = __uverbs_create_xsrq(file, &xcmd, &udata); - if (ret) - return ret; - - return in_len; + return __uverbs_create_xsrq(attrs, &xcmd, &attrs->driver_udata); } -ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, int out_len) +static int ib_uverbs_create_xsrq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_create_xsrq cmd; - struct ib_uverbs_create_srq_resp resp; - struct ib_udata udata; int ret; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof(cmd), - u64_to_user_ptr(cmd.response) + sizeof(resp), - in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr), - out_len - sizeof(resp)); - - ret = __uverbs_create_xsrq(file, &cmd, &udata); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; - return in_len; + return __uverbs_create_xsrq(attrs, &cmd, &attrs->driver_udata); } -ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_modify_srq cmd; - struct ib_udata udata; struct ib_srq *srq; struct ib_srq_attr attr; int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; - - ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd, - out_len); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file); + srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); if (!srq) return -EINVAL; attr.max_wr = cmd.max_wr; attr.srq_limit = cmd.srq_limit; - ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata); + ret = srq->device->ops.modify_srq(srq, &attr, cmd.attr_mask, + &attrs->driver_udata); uobj_put_obj_read(srq); - return ret ? ret : in_len; + return ret; } -ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, - const char __user *buf, - int in_len, int out_len) +static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_query_srq cmd; struct ib_uverbs_query_srq_resp resp; @@ -3902,13 +3530,11 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, struct ib_srq *srq; int ret; - if (out_len < sizeof resp) - return -ENOSPC; - - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, file); + srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); if (!srq) return -EINVAL; @@ -3925,25 +3551,22 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file, resp.max_sge = attr.max_sge; resp.srq_limit = attr.srq_limit; - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) - return -EFAULT; - - return in_len; + return uverbs_response(attrs, &resp, sizeof(resp)); } -ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) +static int ib_uverbs_destroy_srq(struct uverbs_attr_bundle *attrs) { struct ib_uverbs_destroy_srq cmd; struct ib_uverbs_destroy_srq_resp resp; struct ib_uobject *uobj; struct ib_uevent_object *obj; + int ret; - if (copy_from_user(&cmd, buf, sizeof cmd)) - return -EFAULT; + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); + if (ret) + return ret; - uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, file); + uobj = uobj_get_destroy(UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); if (IS_ERR(uobj)) return PTR_ERR(uobj); @@ -3953,35 +3576,24 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, uobj_put_destroy(uobj); - if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) - return -EFAULT; - - return in_len; + return uverbs_response(attrs, &resp, sizeof(resp)); } -int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_query_device(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_query_device_resp resp = { {0} }; + struct ib_uverbs_ex_query_device_resp resp = {}; struct ib_uverbs_ex_query_device cmd; struct ib_device_attr attr = {0}; struct ib_ucontext *ucontext; struct ib_device *ib_dev; int err; - ucontext = ib_uverbs_get_ucontext(file); + ucontext = ib_uverbs_get_ucontext(attrs); if (IS_ERR(ucontext)) return PTR_ERR(ucontext); ib_dev = ucontext->device; - if (!ib_dev->query_device) - return -EOPNOTSUPP; - - if (ucore->inlen < sizeof(cmd)) - return -EINVAL; - - err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + err = uverbs_request(attrs, &cmd, sizeof(cmd)); if (err) return err; @@ -3991,20 +3603,12 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, if (cmd.reserved) return -EINVAL; - resp.response_length = offsetof(typeof(resp), odp_caps); - - if (ucore->outlen < resp.response_length) - return -ENOSPC; - - err = ib_dev->query_device(ib_dev, &attr, uhw); + err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata); if (err) return err; copy_query_dev_fields(ucontext, &resp.base, &attr); - if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) - goto end; - #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING resp.odp_caps.general_caps = attr.odp_caps.general_caps; resp.odp_caps.per_transport_caps.rc_odp_caps = @@ -4014,99 +3618,39 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, resp.odp_caps.per_transport_caps.ud_odp_caps = attr.odp_caps.per_transport_caps.ud_odp_caps; #endif - resp.response_length += sizeof(resp.odp_caps); - - if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask)) - goto end; resp.timestamp_mask = attr.timestamp_mask; - resp.response_length += sizeof(resp.timestamp_mask); - - if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock)) - goto end; - resp.hca_core_clock = attr.hca_core_clock; - resp.response_length += sizeof(resp.hca_core_clock); - - if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex)) - goto end; - resp.device_cap_flags_ex = attr.device_cap_flags; - resp.response_length += sizeof(resp.device_cap_flags_ex); - - if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps)) - goto end; - resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; resp.rss_caps.max_rwq_indirection_tables = attr.rss_caps.max_rwq_indirection_tables; resp.rss_caps.max_rwq_indirection_table_size = attr.rss_caps.max_rwq_indirection_table_size; - - resp.response_length += sizeof(resp.rss_caps); - - if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq)) - goto end; - resp.max_wq_type_rq = attr.max_wq_type_rq; - resp.response_length += sizeof(resp.max_wq_type_rq); - - if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps)) - goto end; - resp.raw_packet_caps = attr.raw_packet_caps; - resp.response_length += sizeof(resp.raw_packet_caps); - - if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps)) - goto end; - resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size; resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags; resp.tm_caps.max_ops = attr.tm_caps.max_ops; resp.tm_caps.max_sge = attr.tm_caps.max_sge; resp.tm_caps.flags = attr.tm_caps.flags; - resp.response_length += sizeof(resp.tm_caps); - - if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps)) - goto end; - resp.cq_moderation_caps.max_cq_moderation_count = attr.cq_caps.max_cq_moderation_count; resp.cq_moderation_caps.max_cq_moderation_period = attr.cq_caps.max_cq_moderation_period; - resp.response_length += sizeof(resp.cq_moderation_caps); - - if (ucore->outlen < resp.response_length + sizeof(resp.max_dm_size)) - goto end; - resp.max_dm_size = attr.max_dm_size; - resp.response_length += sizeof(resp.max_dm_size); -end: - err = ib_copy_to_udata(ucore, &resp, resp.response_length); - return err; + resp.response_length = uverbs_response_length(attrs, sizeof(resp)); + + return uverbs_response(attrs, &resp, sizeof(resp)); } -int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) +static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs) { - struct ib_uverbs_ex_modify_cq cmd = {}; + struct ib_uverbs_ex_modify_cq cmd; struct ib_cq *cq; - size_t required_cmd_sz; int ret; - required_cmd_sz = offsetof(typeof(cmd), reserved) + - sizeof(cmd.reserved); - if (ucore->inlen < required_cmd_sz) - return -EINVAL; - - /* sanity checks */ - if (ucore->inlen > sizeof(cmd) && - !ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) - return -EOPNOTSUPP; - - ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen)); + ret = uverbs_request(attrs, &cmd, sizeof(cmd)); if (ret) return ret; @@ -4116,7 +3660,7 @@ int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file, if (cmd.attr_mask > IB_CQ_MODERATE) return -EOPNOTSUPP; - cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, file); + cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); if (!cq) return -EINVAL; @@ -4126,3 +3670,381 @@ int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file, return ret; } + +/* + * Describe the input structs for write(). Some write methods have an input + * only struct, most have an input and output. If the struct has an output then + * the 'response' u64 must be the first field in the request structure. + * + * If udata is present then both the request and response structs have a + * trailing driver_data flex array. In this case the size of the base struct + * cannot be changed. + */ +#define offsetof_after(_struct, _member) \ + (offsetof(_struct, _member) + sizeof(((_struct *)NULL)->_member)) + +#define UAPI_DEF_WRITE_IO(req, resp) \ + .write.has_resp = 1 + \ + BUILD_BUG_ON_ZERO(offsetof(req, response) != 0) + \ + BUILD_BUG_ON_ZERO(sizeof(((req *)0)->response) != \ + sizeof(u64)), \ + .write.req_size = sizeof(req), .write.resp_size = sizeof(resp) + +#define UAPI_DEF_WRITE_I(req) .write.req_size = sizeof(req) + +#define UAPI_DEF_WRITE_UDATA_IO(req, resp) \ + UAPI_DEF_WRITE_IO(req, resp), \ + .write.has_udata = \ + 1 + \ + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \ + sizeof(req)) + \ + BUILD_BUG_ON_ZERO(offsetof(resp, driver_data) != \ + sizeof(resp)) + +#define UAPI_DEF_WRITE_UDATA_I(req) \ + UAPI_DEF_WRITE_I(req), \ + .write.has_udata = \ + 1 + BUILD_BUG_ON_ZERO(offsetof(req, driver_data) != \ + sizeof(req)) + +/* + * The _EX versions are for use with WRITE_EX and allow the last struct member + * to be specified. Buffers that do not include that member will be rejected. + */ +#define UAPI_DEF_WRITE_IO_EX(req, req_last_member, resp, resp_last_member) \ + .write.has_resp = 1, \ + .write.req_size = offsetof_after(req, req_last_member), \ + .write.resp_size = offsetof_after(resp, resp_last_member) + +#define UAPI_DEF_WRITE_I_EX(req, req_last_member) \ + .write.req_size = offsetof_after(req, req_last_member) + +const struct uapi_definition uverbs_def_write_intf[] = { + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_AH, + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_AH, + ib_uverbs_create_ah, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_create_ah, + struct ib_uverbs_create_ah_resp), + UAPI_DEF_METHOD_NEEDS_FN(create_ah)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_DESTROY_AH, + ib_uverbs_destroy_ah, + UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_ah), + UAPI_DEF_METHOD_NEEDS_FN(destroy_ah))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_COMP_CHANNEL, + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL, + ib_uverbs_create_comp_channel, + UAPI_DEF_WRITE_IO( + struct ib_uverbs_create_comp_channel, + struct ib_uverbs_create_comp_channel_resp))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_CQ, + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_CQ, + ib_uverbs_create_cq, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_create_cq, + struct ib_uverbs_create_cq_resp), + UAPI_DEF_METHOD_NEEDS_FN(create_cq)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_DESTROY_CQ, + ib_uverbs_destroy_cq, + UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_cq, + struct ib_uverbs_destroy_cq_resp), + UAPI_DEF_METHOD_NEEDS_FN(destroy_cq)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_POLL_CQ, + ib_uverbs_poll_cq, + UAPI_DEF_WRITE_IO(struct ib_uverbs_poll_cq, + struct ib_uverbs_poll_cq_resp), + UAPI_DEF_METHOD_NEEDS_FN(poll_cq)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ, + ib_uverbs_req_notify_cq, + UAPI_DEF_WRITE_I(struct ib_uverbs_req_notify_cq), + UAPI_DEF_METHOD_NEEDS_FN(req_notify_cq)), + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_RESIZE_CQ, + ib_uverbs_resize_cq, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_resize_cq, + struct ib_uverbs_resize_cq_resp), + UAPI_DEF_METHOD_NEEDS_FN(resize_cq)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_CREATE_CQ, + ib_uverbs_ex_create_cq, + UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_cq, + reserved, + struct ib_uverbs_ex_create_cq_resp, + response_length), + UAPI_DEF_METHOD_NEEDS_FN(create_cq)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_MODIFY_CQ, + ib_uverbs_ex_modify_cq, + UAPI_DEF_WRITE_I(struct ib_uverbs_ex_modify_cq), + UAPI_DEF_METHOD_NEEDS_FN(create_cq))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_DEVICE, + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_GET_CONTEXT, + ib_uverbs_get_context, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_get_context, + struct ib_uverbs_get_context_resp)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_QUERY_DEVICE, + ib_uverbs_query_device, + UAPI_DEF_WRITE_IO(struct ib_uverbs_query_device, + struct ib_uverbs_query_device_resp)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_QUERY_PORT, + ib_uverbs_query_port, + UAPI_DEF_WRITE_IO(struct ib_uverbs_query_port, + struct ib_uverbs_query_port_resp), + UAPI_DEF_METHOD_NEEDS_FN(query_port)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_QUERY_DEVICE, + ib_uverbs_ex_query_device, + UAPI_DEF_WRITE_IO_EX( + struct ib_uverbs_ex_query_device, + reserved, + struct ib_uverbs_ex_query_device_resp, + response_length), + UAPI_DEF_METHOD_NEEDS_FN(query_device)), + UAPI_DEF_OBJ_NEEDS_FN(alloc_ucontext), + UAPI_DEF_OBJ_NEEDS_FN(dealloc_ucontext)), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_FLOW, + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_CREATE_FLOW, + ib_uverbs_ex_create_flow, + UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_create_flow, + flow_attr, + struct ib_uverbs_create_flow_resp, + flow_handle), + UAPI_DEF_METHOD_NEEDS_FN(create_flow)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_DESTROY_FLOW, + ib_uverbs_ex_destroy_flow, + UAPI_DEF_WRITE_I(struct ib_uverbs_destroy_flow), + UAPI_DEF_METHOD_NEEDS_FN(destroy_flow))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_MR, + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_DEREG_MR, + ib_uverbs_dereg_mr, + UAPI_DEF_WRITE_I(struct ib_uverbs_dereg_mr), + UAPI_DEF_METHOD_NEEDS_FN(dereg_mr)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_REG_MR, + ib_uverbs_reg_mr, + UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_reg_mr, + struct ib_uverbs_reg_mr_resp), + UAPI_DEF_METHOD_NEEDS_FN(reg_user_mr)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_REREG_MR, + ib_uverbs_rereg_mr, + UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_rereg_mr, + struct ib_uverbs_rereg_mr_resp), + UAPI_DEF_METHOD_NEEDS_FN(rereg_user_mr))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_MW, + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_ALLOC_MW, + ib_uverbs_alloc_mw, + UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_mw, + struct ib_uverbs_alloc_mw_resp), + UAPI_DEF_METHOD_NEEDS_FN(alloc_mw)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_DEALLOC_MW, + ib_uverbs_dealloc_mw, + UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_mw), + UAPI_DEF_METHOD_NEEDS_FN(dealloc_mw))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_PD, + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_ALLOC_PD, + ib_uverbs_alloc_pd, + UAPI_DEF_WRITE_UDATA_IO(struct ib_uverbs_alloc_pd, + struct ib_uverbs_alloc_pd_resp), + UAPI_DEF_METHOD_NEEDS_FN(alloc_pd)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_DEALLOC_PD, + ib_uverbs_dealloc_pd, + UAPI_DEF_WRITE_I(struct ib_uverbs_dealloc_pd), + UAPI_DEF_METHOD_NEEDS_FN(dealloc_pd))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_QP, + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_ATTACH_MCAST, + ib_uverbs_attach_mcast, + UAPI_DEF_WRITE_I(struct ib_uverbs_attach_mcast), + UAPI_DEF_METHOD_NEEDS_FN(attach_mcast), + UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)), + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_QP, + ib_uverbs_create_qp, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_create_qp, + struct ib_uverbs_create_qp_resp), + UAPI_DEF_METHOD_NEEDS_FN(create_qp)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_DESTROY_QP, + ib_uverbs_destroy_qp, + UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_qp, + struct ib_uverbs_destroy_qp_resp), + UAPI_DEF_METHOD_NEEDS_FN(destroy_qp)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_DETACH_MCAST, + ib_uverbs_detach_mcast, + UAPI_DEF_WRITE_I(struct ib_uverbs_detach_mcast), + UAPI_DEF_METHOD_NEEDS_FN(detach_mcast)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_MODIFY_QP, + ib_uverbs_modify_qp, + UAPI_DEF_WRITE_I(struct ib_uverbs_modify_qp), + UAPI_DEF_METHOD_NEEDS_FN(modify_qp)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_POST_RECV, + ib_uverbs_post_recv, + UAPI_DEF_WRITE_IO(struct ib_uverbs_post_recv, + struct ib_uverbs_post_recv_resp), + UAPI_DEF_METHOD_NEEDS_FN(post_recv)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_POST_SEND, + ib_uverbs_post_send, + UAPI_DEF_WRITE_IO(struct ib_uverbs_post_send, + struct ib_uverbs_post_send_resp), + UAPI_DEF_METHOD_NEEDS_FN(post_send)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_QUERY_QP, + ib_uverbs_query_qp, + UAPI_DEF_WRITE_IO(struct ib_uverbs_query_qp, + struct ib_uverbs_query_qp_resp), + UAPI_DEF_METHOD_NEEDS_FN(query_qp)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_CREATE_QP, + ib_uverbs_ex_create_qp, + UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_qp, + comp_mask, + struct ib_uverbs_ex_create_qp_resp, + response_length), + UAPI_DEF_METHOD_NEEDS_FN(create_qp)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_MODIFY_QP, + ib_uverbs_ex_modify_qp, + UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_modify_qp, + base, + struct ib_uverbs_ex_modify_qp_resp, + response_length), + UAPI_DEF_METHOD_NEEDS_FN(modify_qp))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_RWQ_IND_TBL, + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL, + ib_uverbs_ex_create_rwq_ind_table, + UAPI_DEF_WRITE_IO_EX( + struct ib_uverbs_ex_create_rwq_ind_table, + log_ind_tbl_size, + struct ib_uverbs_ex_create_rwq_ind_table_resp, + ind_tbl_num), + UAPI_DEF_METHOD_NEEDS_FN(create_rwq_ind_table)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL, + ib_uverbs_ex_destroy_rwq_ind_table, + UAPI_DEF_WRITE_I( + struct ib_uverbs_ex_destroy_rwq_ind_table), + UAPI_DEF_METHOD_NEEDS_FN(destroy_rwq_ind_table))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_WQ, + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_CREATE_WQ, + ib_uverbs_ex_create_wq, + UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_create_wq, + max_sge, + struct ib_uverbs_ex_create_wq_resp, + wqn), + UAPI_DEF_METHOD_NEEDS_FN(create_wq)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_DESTROY_WQ, + ib_uverbs_ex_destroy_wq, + UAPI_DEF_WRITE_IO_EX(struct ib_uverbs_ex_destroy_wq, + wq_handle, + struct ib_uverbs_ex_destroy_wq_resp, + reserved), + UAPI_DEF_METHOD_NEEDS_FN(destroy_wq)), + DECLARE_UVERBS_WRITE_EX( + IB_USER_VERBS_EX_CMD_MODIFY_WQ, + ib_uverbs_ex_modify_wq, + UAPI_DEF_WRITE_I_EX(struct ib_uverbs_ex_modify_wq, + curr_wq_state), + UAPI_DEF_METHOD_NEEDS_FN(modify_wq))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_SRQ, + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_SRQ, + ib_uverbs_create_srq, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_create_srq, + struct ib_uverbs_create_srq_resp), + UAPI_DEF_METHOD_NEEDS_FN(create_srq)), + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_CREATE_XSRQ, + ib_uverbs_create_xsrq, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_create_xsrq, + struct ib_uverbs_create_srq_resp), + UAPI_DEF_METHOD_NEEDS_FN(create_srq)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_DESTROY_SRQ, + ib_uverbs_destroy_srq, + UAPI_DEF_WRITE_IO(struct ib_uverbs_destroy_srq, + struct ib_uverbs_destroy_srq_resp), + UAPI_DEF_METHOD_NEEDS_FN(destroy_srq)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_MODIFY_SRQ, + ib_uverbs_modify_srq, + UAPI_DEF_WRITE_UDATA_I(struct ib_uverbs_modify_srq), + UAPI_DEF_METHOD_NEEDS_FN(modify_srq)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_POST_SRQ_RECV, + ib_uverbs_post_srq_recv, + UAPI_DEF_WRITE_IO(struct ib_uverbs_post_srq_recv, + struct ib_uverbs_post_srq_recv_resp), + UAPI_DEF_METHOD_NEEDS_FN(post_srq_recv)), + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_QUERY_SRQ, + ib_uverbs_query_srq, + UAPI_DEF_WRITE_IO(struct ib_uverbs_query_srq, + struct ib_uverbs_query_srq_resp), + UAPI_DEF_METHOD_NEEDS_FN(query_srq))), + + DECLARE_UVERBS_OBJECT( + UVERBS_OBJECT_XRCD, + DECLARE_UVERBS_WRITE( + IB_USER_VERBS_CMD_CLOSE_XRCD, + ib_uverbs_close_xrcd, + UAPI_DEF_WRITE_I(struct ib_uverbs_close_xrcd), + UAPI_DEF_METHOD_NEEDS_FN(dealloc_xrcd)), + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_QP, + ib_uverbs_open_qp, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_open_qp, + struct ib_uverbs_create_qp_resp)), + DECLARE_UVERBS_WRITE(IB_USER_VERBS_CMD_OPEN_XRCD, + ib_uverbs_open_xrcd, + UAPI_DEF_WRITE_UDATA_IO( + struct ib_uverbs_open_xrcd, + struct ib_uverbs_open_xrcd_resp), + UAPI_DEF_METHOD_NEEDS_FN(alloc_xrcd))), + + {}, +}; diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c index b0e493e8d860..8c81ff698052 100644 --- a/drivers/infiniband/core/uverbs_ioctl.c +++ b/drivers/infiniband/core/uverbs_ioctl.c @@ -404,8 +404,7 @@ static int uverbs_set_attr(struct bundle_priv *pbundle, static int ib_uverbs_run_method(struct bundle_priv *pbundle, unsigned int num_attrs) { - int (*handler)(struct ib_uverbs_file *ufile, - struct uverbs_attr_bundle *ctx); + int (*handler)(struct uverbs_attr_bundle *attrs); size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs); unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey; unsigned int i; @@ -436,6 +435,11 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, pbundle->method_elm->key_bitmap_len))) return -EINVAL; + if (pbundle->method_elm->has_udata) + uverbs_fill_udata(&pbundle->bundle, + &pbundle->bundle.driver_udata, + UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT); + if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) { struct uverbs_obj_attr *destroy_attr = &pbundle->bundle.attrs[destroy_bkey].obj_attr; @@ -445,10 +449,10 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle, return ret; __clear_bit(destroy_bkey, pbundle->uobj_finalize); - ret = handler(pbundle->bundle.ufile, &pbundle->bundle); + ret = handler(&pbundle->bundle); uobj_put_destroy(destroy_attr->uobject); } else { - ret = handler(pbundle->bundle.ufile, &pbundle->bundle); + ret = handler(&pbundle->bundle); } /* @@ -662,35 +666,37 @@ int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle, EXPORT_SYMBOL(uverbs_get_flags32); /* - * This is for ease of conversion. The purpose is to convert all drivers to - * use uverbs_attr_bundle instead of ib_udata. Assume attr == 0 is input and - * attr == 1 is output. + * Fill a ib_udata struct (core or uhw) using the given attribute IDs. + * This is primarily used to convert the UVERBS_ATTR_UHW() into the + * ib_udata format used by the drivers. */ -void create_udata(struct uverbs_attr_bundle *bundle, struct ib_udata *udata) +void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, + struct ib_udata *udata, unsigned int attr_in, + unsigned int attr_out) { struct bundle_priv *pbundle = container_of(bundle, struct bundle_priv, bundle); - const struct uverbs_attr *uhw_in = - uverbs_attr_get(bundle, UVERBS_ATTR_UHW_IN); - const struct uverbs_attr *uhw_out = - uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT); - - if (!IS_ERR(uhw_in)) { - udata->inlen = uhw_in->ptr_attr.len; - if (uverbs_attr_ptr_is_inline(uhw_in)) + const struct uverbs_attr *in = + uverbs_attr_get(&pbundle->bundle, attr_in); + const struct uverbs_attr *out = + uverbs_attr_get(&pbundle->bundle, attr_out); + + if (!IS_ERR(in)) { + udata->inlen = in->ptr_attr.len; + if (uverbs_attr_ptr_is_inline(in)) udata->inbuf = - &pbundle->user_attrs[uhw_in->ptr_attr.uattr_idx] + &pbundle->user_attrs[in->ptr_attr.uattr_idx] .data; else - udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data); + udata->inbuf = u64_to_user_ptr(in->ptr_attr.data); } else { udata->inbuf = NULL; udata->inlen = 0; } - if (!IS_ERR(uhw_out)) { - udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data); - udata->outlen = uhw_out->ptr_attr.len; + if (!IS_ERR(out)) { + udata->outbuf = u64_to_user_ptr(out->ptr_attr.data); + udata->outlen = out->ptr_attr.len; } else { udata->outbuf = NULL; udata->outlen = 0; @@ -745,3 +751,14 @@ int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle, return 0; } EXPORT_SYMBOL(_uverbs_get_const); + +int uverbs_copy_to_struct_or_zero(const struct uverbs_attr_bundle *bundle, + size_t idx, const void *from, size_t size) +{ + const struct uverbs_attr *attr = uverbs_attr_get(bundle, idx); + + if (clear_user(u64_to_user_ptr(attr->ptr_attr.data), + attr->ptr_attr.len)) + return -EFAULT; + return uverbs_copy_to(bundle, idx, from, size); +} diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 6d373f5515b7..9f9172eb1512 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -74,64 +74,6 @@ static dev_t dynamic_uverbs_dev; static struct class *uverbs_class; static DEFINE_IDA(uverbs_ida); - -static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, - const char __user *buf, int in_len, - int out_len) = { - [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, - [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, - [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, - [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, - [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, - [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, - [IB_USER_VERBS_CMD_REREG_MR] = ib_uverbs_rereg_mr, - [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, - [IB_USER_VERBS_CMD_ALLOC_MW] = ib_uverbs_alloc_mw, - [IB_USER_VERBS_CMD_DEALLOC_MW] = ib_uverbs_dealloc_mw, - [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, - [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, - [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, - [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, - [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, - [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, - [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, - [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, - [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, - [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, - [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, - [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, - [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, - [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, - [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, - [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, - [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, - [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, - [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, - [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, - [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, - [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd, - [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, - [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, - [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp, -}; - -static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, - struct ib_udata *ucore, - struct ib_udata *uhw) = { - [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, - [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, - [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device, - [IB_USER_VERBS_EX_CMD_CREATE_CQ] = ib_uverbs_ex_create_cq, - [IB_USER_VERBS_EX_CMD_CREATE_QP] = ib_uverbs_ex_create_qp, - [IB_USER_VERBS_EX_CMD_CREATE_WQ] = ib_uverbs_ex_create_wq, - [IB_USER_VERBS_EX_CMD_MODIFY_WQ] = ib_uverbs_ex_modify_wq, - [IB_USER_VERBS_EX_CMD_DESTROY_WQ] = ib_uverbs_ex_destroy_wq, - [IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table, - [IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table, - [IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp, - [IB_USER_VERBS_EX_CMD_MODIFY_CQ] = ib_uverbs_ex_modify_cq, -}; - static void ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); @@ -139,7 +81,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data); * Must be called with the ufile->device->disassociate_srcu held, and the lock * must be held until use of the ucontext is finished. */ -struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile) +struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile) { /* * We do not hold the hw_destroy_rwsem lock for this flow, instead @@ -157,14 +99,14 @@ struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile) return ucontext; } -EXPORT_SYMBOL(ib_uverbs_get_ucontext); +EXPORT_SYMBOL(ib_uverbs_get_ucontext_file); int uverbs_dealloc_mw(struct ib_mw *mw) { struct ib_pd *pd = mw->pd; int ret; - ret = mw->device->dealloc_mw(mw); + ret = mw->device->ops.dealloc_mw(mw); if (!ret) atomic_dec(&pd->usecnt); return ret; @@ -255,7 +197,7 @@ void ib_uverbs_release_file(struct kref *ref) srcu_key = srcu_read_lock(&file->device->disassociate_srcu); ib_dev = srcu_dereference(file->device->ib_dev, &file->device->disassociate_srcu); - if (ib_dev && !ib_dev->disassociate_ucontext) + if (ib_dev && !ib_dev->ops.disassociate_ucontext) module_put(ib_dev->owner); srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); @@ -646,51 +588,19 @@ err_put_refs: return filp; } -static bool verify_command_mask(struct ib_uverbs_file *ufile, u32 command, - bool extended) -{ - if (!extended) - return ufile->uverbs_cmd_mask & BIT_ULL(command); - - return ufile->uverbs_ex_cmd_mask & BIT_ULL(command); -} - -static bool verify_command_idx(u32 command, bool extended) -{ - if (extended) - return command < ARRAY_SIZE(uverbs_ex_cmd_table) && - uverbs_ex_cmd_table[command]; - - return command < ARRAY_SIZE(uverbs_cmd_table) && - uverbs_cmd_table[command]; -} - -static ssize_t process_hdr(struct ib_uverbs_cmd_hdr *hdr, - u32 *command, bool *extended) -{ - if (hdr->command & ~(u32)(IB_USER_VERBS_CMD_FLAG_EXTENDED | - IB_USER_VERBS_CMD_COMMAND_MASK)) - return -EINVAL; - - *command = hdr->command & IB_USER_VERBS_CMD_COMMAND_MASK; - *extended = hdr->command & IB_USER_VERBS_CMD_FLAG_EXTENDED; - - if (!verify_command_idx(*command, *extended)) - return -EOPNOTSUPP; - - return 0; -} - static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, - struct ib_uverbs_ex_cmd_hdr *ex_hdr, - size_t count, bool extended) + struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count, + const struct uverbs_api_write_method *method_elm) { - if (extended) { + if (method_elm->is_ex) { count -= sizeof(*hdr) + sizeof(*ex_hdr); if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count) return -EINVAL; + if (hdr->in_words * 8 < method_elm->req_size) + return -ENOSPC; + if (ex_hdr->cmd_hdr_reserved) return -EINVAL; @@ -698,6 +608,9 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, if (!hdr->out_words && !ex_hdr->provider_out_words) return -EINVAL; + if (hdr->out_words * 8 < method_elm->resp_size) + return -ENOSPC; + if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(ex_hdr->response), (hdr->out_words + ex_hdr->provider_out_words) * 8)) @@ -714,6 +627,24 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr, if (hdr->in_words * 4 != count) return -EINVAL; + if (count < method_elm->req_size + sizeof(hdr)) { + /* + * rdma-core v18 and v19 have a bug where they send DESTROY_CQ + * with a 16 byte write instead of 24. Old kernels didn't + * check the size so they allowed this. Now that the size is + * checked provide a compatibility work around to not break + * those userspaces. + */ + if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ && + count == 16) { + hdr->in_words = 6; + return 0; + } + return -ENOSPC; + } + if (hdr->out_words * 4 < method_elm->resp_size) + return -ENOSPC; + return 0; } @@ -721,11 +652,12 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ib_uverbs_file *file = filp->private_data; + const struct uverbs_api_write_method *method_elm; + struct uverbs_api *uapi = file->device->uapi; struct ib_uverbs_ex_cmd_hdr ex_hdr; struct ib_uverbs_cmd_hdr hdr; - bool extended; + struct uverbs_attr_bundle bundle; int srcu_key; - u32 command; ssize_t ret; if (!ib_safe_file_access(filp)) { @@ -740,57 +672,92 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; - ret = process_hdr(&hdr, &command, &extended); - if (ret) - return ret; + method_elm = uapi_get_method(uapi, hdr.command); + if (IS_ERR(method_elm)) + return PTR_ERR(method_elm); - if (extended) { + if (method_elm->is_ex) { if (count < (sizeof(hdr) + sizeof(ex_hdr))) return -EINVAL; if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr))) return -EFAULT; } - ret = verify_hdr(&hdr, &ex_hdr, count, extended); + ret = verify_hdr(&hdr, &ex_hdr, count, method_elm); if (ret) return ret; srcu_key = srcu_read_lock(&file->device->disassociate_srcu); - if (!verify_command_mask(file, command, extended)) { - ret = -EOPNOTSUPP; - goto out; - } - buf += sizeof(hdr); - if (!extended) { - ret = uverbs_cmd_table[command](file, buf, - hdr.in_words * 4, - hdr.out_words * 4); - } else { - struct ib_udata ucore; - struct ib_udata uhw; + bundle.ufile = file; + if (!method_elm->is_ex) { + size_t in_len = hdr.in_words * 4 - sizeof(hdr); + size_t out_len = hdr.out_words * 4; + u64 response = 0; + + if (method_elm->has_udata) { + bundle.driver_udata.inlen = + in_len - method_elm->req_size; + in_len = method_elm->req_size; + if (bundle.driver_udata.inlen) + bundle.driver_udata.inbuf = buf + in_len; + else + bundle.driver_udata.inbuf = NULL; + } else { + memset(&bundle.driver_udata, 0, + sizeof(bundle.driver_udata)); + } + + if (method_elm->has_resp) { + /* + * The macros check that if has_resp is set + * then the command request structure starts + * with a '__aligned u64 response' member. + */ + ret = get_user(response, (const u64 *)buf); + if (ret) + goto out_unlock; + + if (method_elm->has_udata) { + bundle.driver_udata.outlen = + out_len - method_elm->resp_size; + out_len = method_elm->resp_size; + if (bundle.driver_udata.outlen) + bundle.driver_udata.outbuf = + u64_to_user_ptr(response + + out_len); + else + bundle.driver_udata.outbuf = NULL; + } + } else { + bundle.driver_udata.outlen = 0; + bundle.driver_udata.outbuf = NULL; + } + ib_uverbs_init_udata_buf_or_null( + &bundle.ucore, buf, u64_to_user_ptr(response), + in_len, out_len); + } else { buf += sizeof(ex_hdr); - ib_uverbs_init_udata_buf_or_null(&ucore, buf, + ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf, u64_to_user_ptr(ex_hdr.response), hdr.in_words * 8, hdr.out_words * 8); - ib_uverbs_init_udata_buf_or_null(&uhw, - buf + ucore.inlen, - u64_to_user_ptr(ex_hdr.response) + ucore.outlen, - ex_hdr.provider_in_words * 8, - ex_hdr.provider_out_words * 8); + ib_uverbs_init_udata_buf_or_null( + &bundle.driver_udata, buf + bundle.ucore.inlen, + u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen, + ex_hdr.provider_in_words * 8, + ex_hdr.provider_out_words * 8); - ret = uverbs_ex_cmd_table[command](file, &ucore, &uhw); - ret = (ret) ? : count; } -out: + ret = method_elm->handler(&bundle); +out_unlock: srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); - return ret; + return (ret) ? : count; } static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) @@ -801,13 +768,13 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) int srcu_key; srcu_key = srcu_read_lock(&file->device->disassociate_srcu); - ucontext = ib_uverbs_get_ucontext(file); + ucontext = ib_uverbs_get_ucontext_file(file); if (IS_ERR(ucontext)) { ret = PTR_ERR(ucontext); goto out; } - ret = ucontext->device->mmap(ucontext, vma); + ret = ucontext->device->ops.mmap(ucontext, vma); out: srcu_read_unlock(&file->device->disassociate_srcu, srcu_key); return ret; @@ -1069,7 +1036,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) /* In case IB device supports disassociate ucontext, there is no hard * dependency between uverbs device and its low level device. */ - module_dependent = !(ib_dev->disassociate_ucontext); + module_dependent = !(ib_dev->ops.disassociate_ucontext); if (module_dependent) { if (!try_module_get(ib_dev->owner)) { @@ -1102,9 +1069,6 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) mutex_unlock(&dev->lists_mutex); srcu_read_unlock(&dev->disassociate_srcu, srcu_key); - file->uverbs_cmd_mask = ib_dev->uverbs_cmd_mask; - file->uverbs_ex_cmd_mask = ib_dev->uverbs_ex_cmd_mask; - setup_ufile_idr_uobject(file); return nonseekable_open(inode, filp); @@ -1224,7 +1188,7 @@ static int ib_uverbs_create_uapi(struct ib_device *device, { struct uverbs_api *uapi; - uapi = uverbs_alloc_api(device->driver_specs, device->driver_id); + uapi = uverbs_alloc_api(device); if (IS_ERR(uapi)) return PTR_ERR(uapi); @@ -1239,7 +1203,7 @@ static void ib_uverbs_add_one(struct ib_device *device) struct ib_uverbs_device *uverbs_dev; int ret; - if (!device->alloc_ucontext) + if (!device->ops.alloc_ucontext) return; uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL); @@ -1285,7 +1249,7 @@ static void ib_uverbs_add_one(struct ib_device *device) dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum); cdev_init(&uverbs_dev->cdev, - device->mmap ? &uverbs_mmap_fops : &uverbs_fops); + device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops); uverbs_dev->cdev.owner = THIS_MODULE; ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev); @@ -1373,7 +1337,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data) cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev); ida_free(&uverbs_ida, uverbs_dev->devnum); - if (device->disassociate_ucontext) { + if (device->ops.disassociate_ucontext) { /* We disassociate HW resources and immediately return. * Userspace will see a EIO errno for all future access. * Upon returning, ib_device may be freed internally and is not diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c index 203cc96ac6f5..cbc72312eb41 100644 --- a/drivers/infiniband/core/uverbs_std_types.c +++ b/drivers/infiniband/core/uverbs_std_types.c @@ -42,7 +42,8 @@ static int uverbs_free_ah(struct ib_uobject *uobject, enum rdma_remove_reason why) { - return rdma_destroy_ah((struct ib_ah *)uobject->object); + return rdma_destroy_ah((struct ib_ah *)uobject->object, + RDMA_DESTROY_AH_SLEEPABLE); } static int uverbs_free_flow(struct ib_uobject *uobject, @@ -54,7 +55,7 @@ static int uverbs_free_flow(struct ib_uobject *uobject, struct ib_qp *qp = flow->qp; int ret; - ret = flow->device->destroy_flow(flow); + ret = flow->device->ops.destroy_flow(flow); if (!ret) { if (qp) atomic_dec(&qp->usecnt); @@ -210,8 +211,7 @@ static int uverbs_hot_unplug_completion_event_file(struct ib_uobject *uobj, return 0; }; -int uverbs_destroy_def_handler(struct ib_uverbs_file *file, - struct uverbs_attr_bundle *attrs) +int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs) { return 0; } @@ -229,58 +229,106 @@ DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_QP, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), uverbs_free_qp)); +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + UVERBS_METHOD_MW_DESTROY, + UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MW_HANDLE, + UVERBS_OBJECT_MW, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); + DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_MW, - UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw)); + UVERBS_TYPE_ALLOC_IDR(uverbs_free_mw), + &UVERBS_METHOD(UVERBS_METHOD_MW_DESTROY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_SRQ, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), uverbs_free_srq)); +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + UVERBS_METHOD_AH_DESTROY, + UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_AH_HANDLE, + UVERBS_OBJECT_AH, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); + DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_AH, - UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah)); + UVERBS_TYPE_ALLOC_IDR(uverbs_free_ah), + &UVERBS_METHOD(UVERBS_METHOD_AH_DESTROY)); + +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + UVERBS_METHOD_FLOW_DESTROY, + UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_FLOW_HANDLE, + UVERBS_OBJECT_FLOW, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_FLOW, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uflow_object), - uverbs_free_flow)); + uverbs_free_flow), + &UVERBS_METHOD(UVERBS_METHOD_FLOW_DESTROY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_WQ, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), uverbs_free_wq)); +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + UVERBS_METHOD_RWQ_IND_TBL_DESTROY, + UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_RWQ_IND_TBL_HANDLE, + UVERBS_OBJECT_RWQ_IND_TBL, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); + DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL, - UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl)); + UVERBS_TYPE_ALLOC_IDR(uverbs_free_rwq_ind_tbl), + &UVERBS_METHOD(UVERBS_METHOD_RWQ_IND_TBL_DESTROY)); + +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + UVERBS_METHOD_XRCD_DESTROY, + UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_XRCD_HANDLE, + UVERBS_OBJECT_XRCD, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_XRCD, UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), - uverbs_free_xrcd)); + uverbs_free_xrcd), + &UVERBS_METHOD(UVERBS_METHOD_XRCD_DESTROY)); + +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + UVERBS_METHOD_PD_DESTROY, + UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_PD_HANDLE, + UVERBS_OBJECT_PD, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_PD, - UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd)); - -DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE); - -DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects, - &UVERBS_OBJECT(UVERBS_OBJECT_DEVICE), - &UVERBS_OBJECT(UVERBS_OBJECT_PD), - &UVERBS_OBJECT(UVERBS_OBJECT_MR), - &UVERBS_OBJECT(UVERBS_OBJECT_COMP_CHANNEL), - &UVERBS_OBJECT(UVERBS_OBJECT_CQ), - &UVERBS_OBJECT(UVERBS_OBJECT_QP), - &UVERBS_OBJECT(UVERBS_OBJECT_AH), - &UVERBS_OBJECT(UVERBS_OBJECT_MW), - &UVERBS_OBJECT(UVERBS_OBJECT_SRQ), - &UVERBS_OBJECT(UVERBS_OBJECT_FLOW), - &UVERBS_OBJECT(UVERBS_OBJECT_WQ), - &UVERBS_OBJECT(UVERBS_OBJECT_RWQ_IND_TBL), - &UVERBS_OBJECT(UVERBS_OBJECT_XRCD), - &UVERBS_OBJECT(UVERBS_OBJECT_FLOW_ACTION), - &UVERBS_OBJECT(UVERBS_OBJECT_DM), - &UVERBS_OBJECT(UVERBS_OBJECT_COUNTERS)); - -const struct uverbs_object_tree_def *uverbs_default_get_objects(void) -{ - return &uverbs_default_objects; -} + UVERBS_TYPE_ALLOC_IDR(uverbs_free_pd), + &UVERBS_METHOD(UVERBS_METHOD_PD_DESTROY)); + +const struct uapi_definition uverbs_def_obj_intf[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_PD, + UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COMP_CHANNEL, + UAPI_DEF_OBJ_NEEDS_FN(dealloc_pd)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_QP, + UAPI_DEF_OBJ_NEEDS_FN(destroy_qp)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_AH, + UAPI_DEF_OBJ_NEEDS_FN(destroy_ah)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MW, + UAPI_DEF_OBJ_NEEDS_FN(dealloc_mw)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_SRQ, + UAPI_DEF_OBJ_NEEDS_FN(destroy_srq)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_FLOW, + UAPI_DEF_OBJ_NEEDS_FN(destroy_flow)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_WQ, + UAPI_DEF_OBJ_NEEDS_FN(destroy_wq)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( + UVERBS_OBJECT_RWQ_IND_TBL, + UAPI_DEF_OBJ_NEEDS_FN(destroy_rwq_ind_table)), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_XRCD, + UAPI_DEF_OBJ_NEEDS_FN(dealloc_xrcd)), + {} +}; diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c index a0ffdcf9a51c..309c5e80988d 100644 --- a/drivers/infiniband/core/uverbs_std_types_counters.c +++ b/drivers/infiniband/core/uverbs_std_types_counters.c @@ -44,11 +44,11 @@ static int uverbs_free_counters(struct ib_uobject *uobject, if (ret) return ret; - return counters->device->destroy_counters(counters); + return counters->device->ops.destroy_counters(counters); } static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)( - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) + struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject( attrs, UVERBS_ATTR_CREATE_COUNTERS_HANDLE); @@ -61,10 +61,10 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_CREATE)( * have the ability to remove methods from parse tree once * such condition is met. */ - if (!ib_dev->create_counters) + if (!ib_dev->ops.create_counters) return -EOPNOTSUPP; - counters = ib_dev->create_counters(ib_dev, attrs); + counters = ib_dev->ops.create_counters(ib_dev, attrs); if (IS_ERR(counters)) { ret = PTR_ERR(counters); goto err_create_counters; @@ -82,7 +82,7 @@ err_create_counters: } static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)( - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) + struct uverbs_attr_bundle *attrs) { struct ib_counters_read_attr read_attr = {}; const struct uverbs_attr *uattr; @@ -90,7 +90,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)( uverbs_attr_get_obj(attrs, UVERBS_ATTR_READ_COUNTERS_HANDLE); int ret; - if (!counters->device->read_counters) + if (!counters->device->ops.read_counters) return -EOPNOTSUPP; if (!atomic_read(&counters->usecnt)) @@ -109,7 +109,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)( if (IS_ERR(read_attr.counters_buff)) return PTR_ERR(read_attr.counters_buff); - ret = counters->device->read_counters(counters, &read_attr, attrs); + ret = counters->device->ops.read_counters(counters, &read_attr, attrs); if (ret) return ret; @@ -149,3 +149,9 @@ DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_COUNTERS, &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_CREATE), &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_DESTROY), &UVERBS_METHOD(UVERBS_METHOD_COUNTERS_READ)); + +const struct uapi_definition uverbs_def_obj_counters[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_COUNTERS, + UAPI_DEF_OBJ_NEEDS_FN(destroy_counters)), + {} +}; diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c index 5b5f2052cd52..a59ea89e3f2b 100644 --- a/drivers/infiniband/core/uverbs_std_types_cq.c +++ b/drivers/infiniband/core/uverbs_std_types_cq.c @@ -58,13 +58,12 @@ static int uverbs_free_cq(struct ib_uobject *uobject, } static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) + struct uverbs_attr_bundle *attrs) { struct ib_ucq_object *obj = container_of( uverbs_attr_get_uobject(attrs, UVERBS_ATTR_CREATE_CQ_HANDLE), typeof(*obj), uobject); struct ib_device *ib_dev = obj->uobject.context->device; - struct ib_udata uhw; int ret; u64 user_handle; struct ib_cq_init_attr attr = {}; @@ -72,7 +71,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( struct ib_uverbs_completion_event_file *ev_file = NULL; struct ib_uobject *ev_file_uobj; - if (!ib_dev->create_cq || !ib_dev->destroy_cq) + if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq) return -EOPNOTSUPP; ret = uverbs_copy_from(&attr.comp_vector, attrs, @@ -101,7 +100,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( uverbs_uobject_get(ev_file_uobj); } - if (attr.comp_vector >= file->device->num_comp_vectors) { + if (attr.comp_vector >= attrs->ufile->device->num_comp_vectors) { ret = -EINVAL; goto err_event_file; } @@ -111,10 +110,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( INIT_LIST_HEAD(&obj->comp_list); INIT_LIST_HEAD(&obj->async_list); - /* Temporary, only until drivers get the new uverbs_attr_bundle */ - create_udata(attrs, &uhw); - - cq = ib_dev->create_cq(ib_dev, &attr, obj->uobject.context, &uhw); + cq = ib_dev->ops.create_cq(ib_dev, &attr, obj->uobject.context, + &attrs->driver_udata); if (IS_ERR(cq)) { ret = PTR_ERR(cq); goto err_event_file; @@ -129,7 +126,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)( obj->uobject.user_handle = user_handle; atomic_set(&cq->usecnt, 0); cq->res.type = RDMA_RESTRACK_CQ; - rdma_restrack_add(&cq->res); + rdma_restrack_uadd(&cq->res); ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe, sizeof(cq->cqe)); @@ -173,7 +170,7 @@ DECLARE_UVERBS_NAMED_METHOD( UVERBS_ATTR_UHW()); static int UVERBS_HANDLER(UVERBS_METHOD_CQ_DESTROY)( - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) + struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, UVERBS_ATTR_DESTROY_CQ_HANDLE); @@ -207,3 +204,9 @@ DECLARE_UVERBS_NAMED_OBJECT( &UVERBS_METHOD(UVERBS_METHOD_CQ_DESTROY) #endif ); + +const struct uapi_definition uverbs_def_obj_cq[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_CQ, + UAPI_DEF_OBJ_NEEDS_FN(destroy_cq)), + {} +}; diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c new file mode 100644 index 000000000000..5030ec480370 --- /dev/null +++ b/drivers/infiniband/core/uverbs_std_types_device.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* + * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. + */ + +#include <rdma/uverbs_std_types.h> +#include "rdma_core.h" +#include "uverbs.h" +#include <rdma/uverbs_ioctl.h> +#include <rdma/opa_addr.h> + +/* + * This ioctl method allows calling any defined write or write_ex + * handler. This essentially replaces the hdr/ex_hdr system with the ioctl + * marshalling, and brings the non-ex path into the same marshalling as the ex + * path. + */ +static int UVERBS_HANDLER(UVERBS_METHOD_INVOKE_WRITE)( + struct uverbs_attr_bundle *attrs) +{ + struct uverbs_api *uapi = attrs->ufile->device->uapi; + const struct uverbs_api_write_method *method_elm; + u32 cmd; + int rc; + + rc = uverbs_get_const(&cmd, attrs, UVERBS_ATTR_WRITE_CMD); + if (rc) + return rc; + + method_elm = uapi_get_method(uapi, cmd); + if (IS_ERR(method_elm)) + return PTR_ERR(method_elm); + + uverbs_fill_udata(attrs, &attrs->ucore, UVERBS_ATTR_CORE_IN, + UVERBS_ATTR_CORE_OUT); + + if (attrs->ucore.inlen < method_elm->req_size || + attrs->ucore.outlen < method_elm->resp_size) + return -ENOSPC; + + return method_elm->handler(attrs); +} + +DECLARE_UVERBS_NAMED_METHOD(UVERBS_METHOD_INVOKE_WRITE, + UVERBS_ATTR_CONST_IN(UVERBS_ATTR_WRITE_CMD, + enum ib_uverbs_write_cmds, + UA_MANDATORY), + UVERBS_ATTR_PTR_IN(UVERBS_ATTR_CORE_IN, + UVERBS_ATTR_MIN_SIZE(sizeof(u32)), + UA_OPTIONAL), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_CORE_OUT, + UVERBS_ATTR_MIN_SIZE(0), + UA_OPTIONAL), + UVERBS_ATTR_UHW()); + +static uint32_t * +gather_objects_handle(struct ib_uverbs_file *ufile, + const struct uverbs_api_object *uapi_object, + struct uverbs_attr_bundle *attrs, + ssize_t out_len, + u64 *total) +{ + u64 max_count = out_len / sizeof(u32); + struct ib_uobject *obj; + u64 count = 0; + u32 *handles; + + /* Allocated memory that cannot page out where we gather + * all object ids under a spin_lock. + */ + handles = uverbs_zalloc(attrs, out_len); + if (IS_ERR(handles)) + return handles; + + spin_lock_irq(&ufile->uobjects_lock); + list_for_each_entry(obj, &ufile->uobjects, list) { + u32 obj_id = obj->id; + + if (obj->uapi_object != uapi_object) + continue; + + if (count >= max_count) + break; + + handles[count] = obj_id; + count++; + } + spin_unlock_irq(&ufile->uobjects_lock); + + *total = count; + return handles; +} + +static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)( + struct uverbs_attr_bundle *attrs) +{ + const struct uverbs_api_object *uapi_object; + ssize_t out_len; + u64 total = 0; + u16 object_id; + u32 *handles; + int ret; + + out_len = uverbs_attr_get_len(attrs, UVERBS_ATTR_INFO_HANDLES_LIST); + if (out_len <= 0 || (out_len % sizeof(u32) != 0)) + return -EINVAL; + + ret = uverbs_get_const(&object_id, attrs, UVERBS_ATTR_INFO_OBJECT_ID); + if (ret) + return ret; + + uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id); + if (!uapi_object) + return -EINVAL; + + handles = gather_objects_handle(attrs->ufile, uapi_object, attrs, + out_len, &total); + if (IS_ERR(handles)) + return PTR_ERR(handles); + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_HANDLES_LIST, handles, + sizeof(u32) * total); + if (ret) + goto err; + + ret = uverbs_copy_to(attrs, UVERBS_ATTR_INFO_TOTAL_HANDLES, &total, + sizeof(total)); +err: + return ret; +} + +void copy_port_attr_to_resp(struct ib_port_attr *attr, + struct ib_uverbs_query_port_resp *resp, + struct ib_device *ib_dev, u8 port_num) +{ + resp->state = attr->state; + resp->max_mtu = attr->max_mtu; + resp->active_mtu = attr->active_mtu; + resp->gid_tbl_len = attr->gid_tbl_len; + resp->port_cap_flags = make_port_cap_flags(attr); + resp->max_msg_sz = attr->max_msg_sz; + resp->bad_pkey_cntr = attr->bad_pkey_cntr; + resp->qkey_viol_cntr = attr->qkey_viol_cntr; + resp->pkey_tbl_len = attr->pkey_tbl_len; + + if (rdma_is_grh_required(ib_dev, port_num)) + resp->flags |= IB_UVERBS_QPF_GRH_REQUIRED; + + if (rdma_cap_opa_ah(ib_dev, port_num)) { + resp->lid = OPA_TO_IB_UCAST_LID(attr->lid); + resp->sm_lid = OPA_TO_IB_UCAST_LID(attr->sm_lid); + } else { + resp->lid = ib_lid_cpu16(attr->lid); + resp->sm_lid = ib_lid_cpu16(attr->sm_lid); + } + + resp->lmc = attr->lmc; + resp->max_vl_num = attr->max_vl_num; + resp->sm_sl = attr->sm_sl; + resp->subnet_timeout = attr->subnet_timeout; + resp->init_type_reply = attr->init_type_reply; + resp->active_width = attr->active_width; + resp->active_speed = attr->active_speed; + resp->phys_state = attr->phys_state; + resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num); +} + +static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_device *ib_dev = attrs->ufile->device->ib_dev; + struct ib_port_attr attr = {}; + struct ib_uverbs_query_port_resp_ex resp = {}; + int ret; + u8 port_num; + + /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ + if (!ib_dev->ops.query_port) + return -EOPNOTSUPP; + + ret = uverbs_get_const(&port_num, attrs, + UVERBS_ATTR_QUERY_PORT_PORT_NUM); + if (ret) + return ret; + + ret = ib_query_port(ib_dev, port_num, &attr); + if (ret) + return ret; + + copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num); + resp.port_cap_flags2 = attr.port_cap_flags2; + + return uverbs_copy_to_struct_or_zero(attrs, UVERBS_ATTR_QUERY_PORT_RESP, + &resp, sizeof(resp)); +} + +DECLARE_UVERBS_NAMED_METHOD( + UVERBS_METHOD_INFO_HANDLES, + /* Also includes any device specific object ids */ + UVERBS_ATTR_CONST_IN(UVERBS_ATTR_INFO_OBJECT_ID, + enum uverbs_default_objects, UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_TOTAL_HANDLES, + UVERBS_ATTR_TYPE(u32), UA_OPTIONAL), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_INFO_HANDLES_LIST, + UVERBS_ATTR_MIN_SIZE(sizeof(u32)), UA_OPTIONAL)); + +DECLARE_UVERBS_NAMED_METHOD( + UVERBS_METHOD_QUERY_PORT, + UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_PORT_PORT_NUM, u8, UA_MANDATORY), + UVERBS_ATTR_PTR_OUT( + UVERBS_ATTR_QUERY_PORT_RESP, + UVERBS_ATTR_STRUCT(struct ib_uverbs_query_port_resp_ex, + reserved), + UA_MANDATORY)); + +DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE, + &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE), + &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES), + &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT)); + +const struct uapi_definition uverbs_def_obj_device[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE), + {}, +}; diff --git a/drivers/infiniband/core/uverbs_std_types_dm.c b/drivers/infiniband/core/uverbs_std_types_dm.c index edc3ff7733d4..2ef70637bee1 100644 --- a/drivers/infiniband/core/uverbs_std_types_dm.c +++ b/drivers/infiniband/core/uverbs_std_types_dm.c @@ -43,12 +43,11 @@ static int uverbs_free_dm(struct ib_uobject *uobject, if (ret) return ret; - return dm->device->dealloc_dm(dm); + return dm->device->ops.dealloc_dm(dm); } -static int -UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_uverbs_file *file, - struct uverbs_attr_bundle *attrs) +static int UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)( + struct uverbs_attr_bundle *attrs) { struct ib_dm_alloc_attr attr = {}; struct ib_uobject *uobj = @@ -58,7 +57,7 @@ UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_uverbs_file *file, struct ib_dm *dm; int ret; - if (!ib_dev->alloc_dm) + if (!ib_dev->ops.alloc_dm) return -EOPNOTSUPP; ret = uverbs_copy_from(&attr.length, attrs, @@ -71,7 +70,7 @@ UVERBS_HANDLER(UVERBS_METHOD_DM_ALLOC)(struct ib_uverbs_file *file, if (ret) return ret; - dm = ib_dev->alloc_dm(ib_dev, uobj->context, &attr, attrs); + dm = ib_dev->ops.alloc_dm(ib_dev, uobj->context, &attr, attrs); if (IS_ERR(dm)) return PTR_ERR(dm); @@ -109,3 +108,9 @@ DECLARE_UVERBS_NAMED_OBJECT(UVERBS_OBJECT_DM, UVERBS_TYPE_ALLOC_IDR(uverbs_free_dm), &UVERBS_METHOD(UVERBS_METHOD_DM_ALLOC), &UVERBS_METHOD(UVERBS_METHOD_DM_FREE)); + +const struct uapi_definition uverbs_def_obj_dm[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM, + UAPI_DEF_OBJ_NEEDS_FN(dealloc_dm)), + {} +}; diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c index cb9486ad5c67..4962b87fa600 100644 --- a/drivers/infiniband/core/uverbs_std_types_flow_action.c +++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c @@ -43,7 +43,7 @@ static int uverbs_free_flow_action(struct ib_uobject *uobject, if (ret) return ret; - return action->device->destroy_flow_action(action); + return action->device->ops.destroy_flow_action(action); } static u64 esp_flags_uverbs_to_verbs(struct uverbs_attr_bundle *attrs, @@ -223,7 +223,6 @@ struct ib_flow_action_esp_attr { #define ESP_LAST_SUPPORTED_FLAG IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW static int parse_flow_action_esp(struct ib_device *ib_dev, - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs, struct ib_flow_action_esp_attr *esp_attr, bool is_modify) @@ -305,7 +304,7 @@ static int parse_flow_action_esp(struct ib_device *ib_dev, } static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)( - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) + struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject( attrs, UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE); @@ -314,15 +313,16 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)( struct ib_flow_action *action; struct ib_flow_action_esp_attr esp_attr = {}; - if (!ib_dev->create_flow_action_esp) + if (!ib_dev->ops.create_flow_action_esp) return -EOPNOTSUPP; - ret = parse_flow_action_esp(ib_dev, file, attrs, &esp_attr, false); + ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false); if (ret) return ret; /* No need to check as this attribute is marked as MANDATORY */ - action = ib_dev->create_flow_action_esp(ib_dev, &esp_attr.hdr, attrs); + action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr, + attrs); if (IS_ERR(action)) return PTR_ERR(action); @@ -333,7 +333,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)( } static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)( - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) + struct uverbs_attr_bundle *attrs) { struct ib_uobject *uobj = uverbs_attr_get_uobject( attrs, UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE); @@ -341,19 +341,19 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)( int ret; struct ib_flow_action_esp_attr esp_attr = {}; - if (!action->device->modify_flow_action_esp) + if (!action->device->ops.modify_flow_action_esp) return -EOPNOTSUPP; - ret = parse_flow_action_esp(action->device, file, attrs, &esp_attr, - true); + ret = parse_flow_action_esp(action->device, attrs, &esp_attr, true); if (ret) return ret; if (action->type != IB_FLOW_ACTION_ESP) return -EINVAL; - return action->device->modify_flow_action_esp(action, &esp_attr.hdr, - attrs); + return action->device->ops.modify_flow_action_esp(action, + &esp_attr.hdr, + attrs); } static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { @@ -438,3 +438,10 @@ DECLARE_UVERBS_NAMED_OBJECT( &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE), &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_DESTROY), &UVERBS_METHOD(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)); + +const struct uapi_definition uverbs_def_obj_flow_action[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( + UVERBS_OBJECT_FLOW_ACTION, + UAPI_DEF_OBJ_NEEDS_FN(destroy_flow_action)), + {} +}; diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c index cf02e774303e..4d4be0c2b752 100644 --- a/drivers/infiniband/core/uverbs_std_types_mr.c +++ b/drivers/infiniband/core/uverbs_std_types_mr.c @@ -39,8 +39,44 @@ static int uverbs_free_mr(struct ib_uobject *uobject, return ib_dereg_mr((struct ib_mr *)uobject->object); } +static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_pd *pd = + uverbs_attr_get_obj(attrs, UVERBS_ATTR_ADVISE_MR_PD_HANDLE); + enum ib_uverbs_advise_mr_advice advice; + struct ib_device *ib_dev = pd->device; + struct ib_sge *sg_list; + int num_sge; + u32 flags; + int ret; + + /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ + if (!ib_dev->ops.advise_mr) + return -EOPNOTSUPP; + + ret = uverbs_get_const(&advice, attrs, UVERBS_ATTR_ADVISE_MR_ADVICE); + if (ret) + return ret; + + ret = uverbs_get_flags32(&flags, attrs, UVERBS_ATTR_ADVISE_MR_FLAGS, + IB_UVERBS_ADVISE_MR_FLAG_FLUSH); + if (ret) + return ret; + + num_sge = uverbs_attr_ptr_get_array_size( + attrs, UVERBS_ATTR_ADVISE_MR_SGE_LIST, sizeof(struct ib_sge)); + if (num_sge < 0) + return num_sge; + + sg_list = uverbs_attr_get_alloced_ptr(attrs, + UVERBS_ATTR_ADVISE_MR_SGE_LIST); + return ib_dev->ops.advise_mr(pd, advice, flags, sg_list, num_sge, + attrs); +} + static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)( - struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs) + struct uverbs_attr_bundle *attrs) { struct ib_dm_mr_attr attr = {}; struct ib_uobject *uobj = @@ -54,7 +90,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)( struct ib_mr *mr; int ret; - if (!ib_dev->reg_dm_mr) + if (!ib_dev->ops.reg_dm_mr) return -EOPNOTSUPP; ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET); @@ -83,7 +119,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)( attr.length > dm->length - attr.offset) return -EINVAL; - mr = pd->device->reg_dm_mr(pd, dm, &attr, attrs); + mr = pd->device->ops.reg_dm_mr(pd, dm, &attr, attrs); if (IS_ERR(mr)) return PTR_ERR(mr); @@ -115,6 +151,23 @@ err_dereg: } DECLARE_UVERBS_NAMED_METHOD( + UVERBS_METHOD_ADVISE_MR, + UVERBS_ATTR_IDR(UVERBS_ATTR_ADVISE_MR_PD_HANDLE, + UVERBS_OBJECT_PD, + UVERBS_ACCESS_READ, + UA_MANDATORY), + UVERBS_ATTR_CONST_IN(UVERBS_ATTR_ADVISE_MR_ADVICE, + enum ib_uverbs_advise_mr_advice, + UA_MANDATORY), + UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_ADVISE_MR_FLAGS, + enum ib_uverbs_advise_mr_flag, + UA_MANDATORY), + UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ADVISE_MR_SGE_LIST, + UVERBS_ATTR_MIN_SIZE(sizeof(struct ib_uverbs_sge)), + UA_MANDATORY, + UA_ALLOC_AND_COPY)); + +DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_DM_MR_REG, UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE, UVERBS_OBJECT_MR, @@ -143,7 +196,22 @@ DECLARE_UVERBS_NAMED_METHOD( UVERBS_ATTR_TYPE(u32), UA_MANDATORY)); +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + UVERBS_METHOD_MR_DESTROY, + UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MR_HANDLE, + UVERBS_OBJECT_MR, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); + DECLARE_UVERBS_NAMED_OBJECT( UVERBS_OBJECT_MR, UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr), - &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG)); + &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG), + &UVERBS_METHOD(UVERBS_METHOD_MR_DESTROY), + &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR)); + +const struct uapi_definition uverbs_def_obj_mr[] = { + UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MR, + UAPI_DEF_OBJ_NEEDS_FN(dereg_mr)), + {} +}; diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c index 86f3fc5e04b4..9ae08e4b78a3 100644 --- a/drivers/infiniband/core/uverbs_uapi.c +++ b/drivers/infiniband/core/uverbs_uapi.c @@ -8,6 +8,11 @@ #include "rdma_core.h" #include "uverbs.h" +static int ib_uverbs_notsupp(struct uverbs_attr_bundle *attrs) +{ + return -EOPNOTSUPP; +} + static void *uapi_add_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size) { void *elm; @@ -26,6 +31,70 @@ static void *uapi_add_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size) return elm; } +static void *uapi_add_get_elm(struct uverbs_api *uapi, u32 key, + size_t alloc_size, bool *exists) +{ + void *elm; + + elm = uapi_add_elm(uapi, key, alloc_size); + if (!IS_ERR(elm)) { + *exists = false; + return elm; + } + + if (elm != ERR_PTR(-EEXIST)) + return elm; + + elm = radix_tree_lookup(&uapi->radix, key); + if (WARN_ON(!elm)) + return ERR_PTR(-EINVAL); + *exists = true; + return elm; +} + +static int uapi_create_write(struct uverbs_api *uapi, + struct ib_device *ibdev, + const struct uapi_definition *def, + u32 obj_key, + u32 *cur_method_key) +{ + struct uverbs_api_write_method *method_elm; + u32 method_key = obj_key; + bool exists; + + if (def->write.is_ex) + method_key |= uapi_key_write_ex_method(def->write.command_num); + else + method_key |= uapi_key_write_method(def->write.command_num); + + method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm), + &exists); + if (IS_ERR(method_elm)) + return PTR_ERR(method_elm); + + if (WARN_ON(exists && (def->write.is_ex != method_elm->is_ex))) + return -EINVAL; + + method_elm->is_ex = def->write.is_ex; + method_elm->handler = def->func_write; + if (def->write.is_ex) + method_elm->disabled = !(ibdev->uverbs_ex_cmd_mask & + BIT_ULL(def->write.command_num)); + else + method_elm->disabled = !(ibdev->uverbs_cmd_mask & + BIT_ULL(def->write.command_num)); + + if (!def->write.is_ex && def->func_write) { + method_elm->has_udata = def->write.has_udata; + method_elm->has_resp = def->write.has_resp; + method_elm->req_size = def->write.req_size; + method_elm->resp_size = def->write.resp_size; + } + + *cur_method_key = method_key; + return 0; +} + static int uapi_merge_method(struct uverbs_api *uapi, struct uverbs_api_object *obj_elm, u32 obj_key, const struct uverbs_method_def *method, @@ -34,23 +103,21 @@ static int uapi_merge_method(struct uverbs_api *uapi, u32 method_key = obj_key | uapi_key_ioctl_method(method->id); struct uverbs_api_ioctl_method *method_elm; unsigned int i; + bool exists; if (!method->attrs) return 0; - method_elm = uapi_add_elm(uapi, method_key, sizeof(*method_elm)); - if (IS_ERR(method_elm)) { - if (method_elm != ERR_PTR(-EEXIST)) - return PTR_ERR(method_elm); - + method_elm = uapi_add_get_elm(uapi, method_key, sizeof(*method_elm), + &exists); + if (IS_ERR(method_elm)) + return PTR_ERR(method_elm); + if (exists) { /* * This occurs when a driver uses ADD_UVERBS_ATTRIBUTES_SIMPLE */ if (WARN_ON(method->handler)) return -EINVAL; - method_elm = radix_tree_lookup(&uapi->radix, method_key); - if (WARN_ON(!method_elm)) - return -EINVAL; } else { WARN_ON(!method->handler); rcu_assign_pointer(method_elm->handler, method->handler); @@ -98,72 +165,183 @@ static int uapi_merge_method(struct uverbs_api *uapi, return 0; } -static int uapi_merge_tree(struct uverbs_api *uapi, - const struct uverbs_object_tree_def *tree, - bool is_driver) +static int uapi_merge_obj_tree(struct uverbs_api *uapi, + const struct uverbs_object_def *obj, + bool is_driver) { - unsigned int i, j; + struct uverbs_api_object *obj_elm; + unsigned int i; + u32 obj_key; + bool exists; int rc; - if (!tree->objects) + obj_key = uapi_key_obj(obj->id); + obj_elm = uapi_add_get_elm(uapi, obj_key, sizeof(*obj_elm), &exists); + if (IS_ERR(obj_elm)) + return PTR_ERR(obj_elm); + + if (obj->type_attrs) { + if (WARN_ON(obj_elm->type_attrs)) + return -EINVAL; + + obj_elm->id = obj->id; + obj_elm->type_attrs = obj->type_attrs; + obj_elm->type_class = obj->type_attrs->type_class; + /* + * Today drivers are only permitted to use idr_class + * types. They cannot use FD types because we currently have + * no way to revoke the fops pointer after device + * disassociation. + */ + if (WARN_ON(is_driver && + obj->type_attrs->type_class != &uverbs_idr_class)) + return -EINVAL; + } + + if (!obj->methods) return 0; - for (i = 0; i != tree->num_objects; i++) { - const struct uverbs_object_def *obj = (*tree->objects)[i]; - struct uverbs_api_object *obj_elm; - u32 obj_key; + for (i = 0; i != obj->num_methods; i++) { + const struct uverbs_method_def *method = (*obj->methods)[i]; - if (!obj) + if (!method) continue; - obj_key = uapi_key_obj(obj->id); - obj_elm = uapi_add_elm(uapi, obj_key, sizeof(*obj_elm)); - if (IS_ERR(obj_elm)) { - if (obj_elm != ERR_PTR(-EEXIST)) - return PTR_ERR(obj_elm); + rc = uapi_merge_method(uapi, obj_elm, obj_key, method, + is_driver); + if (rc) + return rc; + } - /* This occurs when a driver uses ADD_UVERBS_METHODS */ - if (WARN_ON(obj->type_attrs)) - return -EINVAL; - obj_elm = radix_tree_lookup(&uapi->radix, obj_key); - if (WARN_ON(!obj_elm)) + return 0; +} + +static int uapi_disable_elm(struct uverbs_api *uapi, + const struct uapi_definition *def, + u32 obj_key, + u32 method_key) +{ + bool exists; + + if (def->scope == UAPI_SCOPE_OBJECT) { + struct uverbs_api_object *obj_elm; + + obj_elm = uapi_add_get_elm( + uapi, obj_key, sizeof(*obj_elm), &exists); + if (IS_ERR(obj_elm)) + return PTR_ERR(obj_elm); + obj_elm->disabled = 1; + return 0; + } + + if (def->scope == UAPI_SCOPE_METHOD && + uapi_key_is_ioctl_method(method_key)) { + struct uverbs_api_ioctl_method *method_elm; + + method_elm = uapi_add_get_elm(uapi, method_key, + sizeof(*method_elm), &exists); + if (IS_ERR(method_elm)) + return PTR_ERR(method_elm); + method_elm->disabled = 1; + return 0; + } + + if (def->scope == UAPI_SCOPE_METHOD && + (uapi_key_is_write_method(method_key) || + uapi_key_is_write_ex_method(method_key))) { + struct uverbs_api_write_method *write_elm; + + write_elm = uapi_add_get_elm(uapi, method_key, + sizeof(*write_elm), &exists); + if (IS_ERR(write_elm)) + return PTR_ERR(write_elm); + write_elm->disabled = 1; + return 0; + } + + WARN_ON(true); + return -EINVAL; +} + +static int uapi_merge_def(struct uverbs_api *uapi, struct ib_device *ibdev, + const struct uapi_definition *def_list, + bool is_driver) +{ + const struct uapi_definition *def = def_list; + u32 cur_obj_key = UVERBS_API_KEY_ERR; + u32 cur_method_key = UVERBS_API_KEY_ERR; + bool exists; + int rc; + + if (!def_list) + return 0; + + for (;; def++) { + switch ((enum uapi_definition_kind)def->kind) { + case UAPI_DEF_CHAIN: + rc = uapi_merge_def(uapi, ibdev, def->chain, is_driver); + if (rc) + return rc; + continue; + + case UAPI_DEF_CHAIN_OBJ_TREE: + if (WARN_ON(def->object_start.object_id != + def->chain_obj_tree->id)) return -EINVAL; - } else { - obj_elm->type_attrs = obj->type_attrs; - if (obj->type_attrs) { - obj_elm->type_class = - obj->type_attrs->type_class; - /* - * Today drivers are only permitted to use - * idr_class types. They cannot use FD types - * because we currently have no way to revoke - * the fops pointer after device - * disassociation. - */ - if (WARN_ON(is_driver && - obj->type_attrs->type_class != - &uverbs_idr_class)) - return -EINVAL; - } - } - if (!obj->methods) + cur_obj_key = uapi_key_obj(def->object_start.object_id); + rc = uapi_merge_obj_tree(uapi, def->chain_obj_tree, + is_driver); + if (rc) + return rc; continue; - for (j = 0; j != obj->num_methods; j++) { - const struct uverbs_method_def *method = - (*obj->methods)[j]; - if (!method) + case UAPI_DEF_END: + return 0; + + case UAPI_DEF_IS_SUPPORTED_DEV_FN: { + void **ibdev_fn = + (void *)(&ibdev->ops) + def->needs_fn_offset; + + if (*ibdev_fn) continue; + rc = uapi_disable_elm( + uapi, def, cur_obj_key, cur_method_key); + if (rc) + return rc; + continue; + } - rc = uapi_merge_method(uapi, obj_elm, obj_key, method, - is_driver); + case UAPI_DEF_IS_SUPPORTED_FUNC: + if (def->func_is_supported(ibdev)) + continue; + rc = uapi_disable_elm( + uapi, def, cur_obj_key, cur_method_key); if (rc) return rc; + continue; + + case UAPI_DEF_OBJECT_START: { + struct uverbs_api_object *obj_elm; + + cur_obj_key = uapi_key_obj(def->object_start.object_id); + obj_elm = uapi_add_get_elm(uapi, cur_obj_key, + sizeof(*obj_elm), &exists); + if (IS_ERR(obj_elm)) + return PTR_ERR(obj_elm); + continue; } - } - return 0; + case UAPI_DEF_WRITE: + rc = uapi_create_write( + uapi, ibdev, def, cur_obj_key, &cur_method_key); + if (rc) + return rc; + continue; + } + WARN_ON(true); + return -EINVAL; + } } static int @@ -186,13 +364,16 @@ uapi_finalize_ioctl_method(struct uverbs_api *uapi, u32 attr_bkey = uapi_bkey_attr(attr_key); u8 type = elm->spec.type; - if (uapi_key_attr_to_method(iter.index) != - uapi_key_attr_to_method(method_key)) + if (uapi_key_attr_to_ioctl_method(iter.index) != + uapi_key_attr_to_ioctl_method(method_key)) break; if (elm->spec.mandatory) __set_bit(attr_bkey, method_elm->attr_mandatory); + if (elm->spec.is_udata) + method_elm->has_udata = true; + if (type == UVERBS_ATTR_TYPE_IDR || type == UVERBS_ATTR_TYPE_FD) { u8 access = elm->spec.u.obj.access; @@ -229,9 +410,13 @@ uapi_finalize_ioctl_method(struct uverbs_api *uapi, static int uapi_finalize(struct uverbs_api *uapi) { + const struct uverbs_api_write_method **data; + unsigned long max_write_ex = 0; + unsigned long max_write = 0; struct radix_tree_iter iter; void __rcu **slot; int rc; + int i; radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) { struct uverbs_api_ioctl_method *method_elm = @@ -243,29 +428,209 @@ static int uapi_finalize(struct uverbs_api *uapi) if (rc) return rc; } + + if (uapi_key_is_write_method(iter.index)) + max_write = max(max_write, + iter.index & UVERBS_API_ATTR_KEY_MASK); + if (uapi_key_is_write_ex_method(iter.index)) + max_write_ex = + max(max_write_ex, + iter.index & UVERBS_API_ATTR_KEY_MASK); + } + + uapi->notsupp_method.handler = ib_uverbs_notsupp; + uapi->num_write = max_write + 1; + uapi->num_write_ex = max_write_ex + 1; + data = kmalloc_array(uapi->num_write + uapi->num_write_ex, + sizeof(*uapi->write_methods), GFP_KERNEL); + for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++) + data[i] = &uapi->notsupp_method; + uapi->write_methods = data; + uapi->write_ex_methods = data + uapi->num_write; + + radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) { + if (uapi_key_is_write_method(iter.index)) + uapi->write_methods[iter.index & + UVERBS_API_ATTR_KEY_MASK] = + rcu_dereference_protected(*slot, true); + if (uapi_key_is_write_ex_method(iter.index)) + uapi->write_ex_methods[iter.index & + UVERBS_API_ATTR_KEY_MASK] = + rcu_dereference_protected(*slot, true); } return 0; } -void uverbs_destroy_api(struct uverbs_api *uapi) +static void uapi_remove_range(struct uverbs_api *uapi, u32 start, u32 last) { struct radix_tree_iter iter; void __rcu **slot; - if (!uapi) - return; - - radix_tree_for_each_slot (slot, &uapi->radix, &iter, 0) { + radix_tree_for_each_slot (slot, &uapi->radix, &iter, start) { + if (iter.index > last) + return; kfree(rcu_dereference_protected(*slot, true)); radix_tree_iter_delete(&uapi->radix, &iter, slot); } +} + +static void uapi_remove_object(struct uverbs_api *uapi, u32 obj_key) +{ + uapi_remove_range(uapi, obj_key, + obj_key | UVERBS_API_METHOD_KEY_MASK | + UVERBS_API_ATTR_KEY_MASK); +} + +static void uapi_remove_method(struct uverbs_api *uapi, u32 method_key) +{ + uapi_remove_range(uapi, method_key, + method_key | UVERBS_API_ATTR_KEY_MASK); +} + + +static u32 uapi_get_obj_id(struct uverbs_attr_spec *spec) +{ + if (spec->type == UVERBS_ATTR_TYPE_IDR || + spec->type == UVERBS_ATTR_TYPE_FD) + return spec->u.obj.obj_type; + if (spec->type == UVERBS_ATTR_TYPE_IDRS_ARRAY) + return spec->u2.objs_arr.obj_type; + return UVERBS_API_KEY_ERR; +} + +static void uapi_key_okay(u32 key) +{ + unsigned int count = 0; + + if (uapi_key_is_object(key)) + count++; + if (uapi_key_is_ioctl_method(key)) + count++; + if (uapi_key_is_write_method(key)) + count++; + if (uapi_key_is_write_ex_method(key)) + count++; + if (uapi_key_is_attr(key)) + count++; + WARN(count != 1, "Bad count %d key=%x", count, key); +} + +static void uapi_finalize_disable(struct uverbs_api *uapi) +{ + struct radix_tree_iter iter; + u32 starting_key = 0; + bool scan_again = false; + void __rcu **slot; + +again: + radix_tree_for_each_slot (slot, &uapi->radix, &iter, starting_key) { + uapi_key_okay(iter.index); + + if (uapi_key_is_object(iter.index)) { + struct uverbs_api_object *obj_elm = + rcu_dereference_protected(*slot, true); + + if (obj_elm->disabled) { + /* Have to check all the attrs again */ + scan_again = true; + starting_key = iter.index; + uapi_remove_object(uapi, iter.index); + goto again; + } + continue; + } + + if (uapi_key_is_ioctl_method(iter.index)) { + struct uverbs_api_ioctl_method *method_elm = + rcu_dereference_protected(*slot, true); + + if (method_elm->disabled) { + starting_key = iter.index; + uapi_remove_method(uapi, iter.index); + goto again; + } + continue; + } + + if (uapi_key_is_write_method(iter.index) || + uapi_key_is_write_ex_method(iter.index)) { + struct uverbs_api_write_method *method_elm = + rcu_dereference_protected(*slot, true); + + if (method_elm->disabled) { + kfree(method_elm); + radix_tree_iter_delete(&uapi->radix, &iter, slot); + } + continue; + } + + if (uapi_key_is_attr(iter.index)) { + struct uverbs_api_attr *attr_elm = + rcu_dereference_protected(*slot, true); + const struct uverbs_api_object *tmp_obj; + u32 obj_key; + + /* + * If the method has a mandatory object handle + * attribute which relies on an object which is not + * present then the entire method is uncallable. + */ + if (!attr_elm->spec.mandatory) + continue; + obj_key = uapi_get_obj_id(&attr_elm->spec); + if (obj_key == UVERBS_API_KEY_ERR) + continue; + tmp_obj = uapi_get_object(uapi, obj_key); + if (IS_ERR(tmp_obj)) { + if (PTR_ERR(tmp_obj) == -ENOMSG) + continue; + } else { + if (!tmp_obj->disabled) + continue; + } + + starting_key = iter.index; + uapi_remove_method( + uapi, + iter.index & (UVERBS_API_OBJ_KEY_MASK | + UVERBS_API_METHOD_KEY_MASK)); + goto again; + } + + WARN_ON(false); + } + + if (!scan_again) + return; + scan_again = false; + starting_key = 0; + goto again; +} + +void uverbs_destroy_api(struct uverbs_api *uapi) +{ + if (!uapi) + return; + + uapi_remove_range(uapi, 0, U32_MAX); + kfree(uapi->write_methods); kfree(uapi); } -struct uverbs_api *uverbs_alloc_api( - const struct uverbs_object_tree_def *const *driver_specs, - enum rdma_driver_id driver_id) +static const struct uapi_definition uverbs_core_api[] = { + UAPI_DEF_CHAIN(uverbs_def_obj_counters), + UAPI_DEF_CHAIN(uverbs_def_obj_cq), + UAPI_DEF_CHAIN(uverbs_def_obj_device), + UAPI_DEF_CHAIN(uverbs_def_obj_dm), + UAPI_DEF_CHAIN(uverbs_def_obj_flow_action), + UAPI_DEF_CHAIN(uverbs_def_obj_intf), + UAPI_DEF_CHAIN(uverbs_def_obj_mr), + UAPI_DEF_CHAIN(uverbs_def_write_intf), + {}, +}; + +struct uverbs_api *uverbs_alloc_api(struct ib_device *ibdev) { struct uverbs_api *uapi; int rc; @@ -275,18 +640,16 @@ struct uverbs_api *uverbs_alloc_api( return ERR_PTR(-ENOMEM); INIT_RADIX_TREE(&uapi->radix, GFP_KERNEL); - uapi->driver_id = driver_id; + uapi->driver_id = ibdev->driver_id; - rc = uapi_merge_tree(uapi, uverbs_default_get_objects(), false); + rc = uapi_merge_def(uapi, ibdev, uverbs_core_api, false); + if (rc) + goto err; + rc = uapi_merge_def(uapi, ibdev, ibdev->driver_def, true); if (rc) goto err; - for (; driver_specs && *driver_specs; driver_specs++) { - rc = uapi_merge_tree(uapi, *driver_specs, true); - if (rc) - goto err; - } - + uapi_finalize_disable(uapi); rc = uapi_finalize(uapi); if (rc) goto err; @@ -294,8 +657,9 @@ struct uverbs_api *uverbs_alloc_api( return uapi; err: if (rc != -ENOMEM) - pr_err("Setup of uverbs_api failed, kernel parsing tree description is not valid (%d)??\n", - rc); + dev_err(&ibdev->dev, + "Setup of uverbs_api failed, kernel parsing tree description is not valid (%d)??\n", + rc); uverbs_destroy_api(uapi); return ERR_PTR(rc); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 178899e3ce73..ac011836bb54 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -141,6 +141,10 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) case IB_RATE_100_GBPS: return 40; case IB_RATE_200_GBPS: return 80; case IB_RATE_300_GBPS: return 120; + case IB_RATE_28_GBPS: return 11; + case IB_RATE_50_GBPS: return 20; + case IB_RATE_400_GBPS: return 160; + case IB_RATE_600_GBPS: return 240; default: return -1; } } @@ -166,6 +170,10 @@ __attribute_const__ enum ib_rate mult_to_ib_rate(int mult) case 40: return IB_RATE_100_GBPS; case 80: return IB_RATE_200_GBPS; case 120: return IB_RATE_300_GBPS; + case 11: return IB_RATE_28_GBPS; + case 20: return IB_RATE_50_GBPS; + case 160: return IB_RATE_400_GBPS; + case 240: return IB_RATE_600_GBPS; default: return IB_RATE_PORT_CURRENT; } } @@ -191,6 +199,10 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate) case IB_RATE_100_GBPS: return 103125; case IB_RATE_200_GBPS: return 206250; case IB_RATE_300_GBPS: return 309375; + case IB_RATE_28_GBPS: return 28125; + case IB_RATE_50_GBPS: return 53125; + case IB_RATE_400_GBPS: return 425000; + case IB_RATE_600_GBPS: return 637500; default: return -1; } } @@ -214,8 +226,8 @@ EXPORT_SYMBOL(rdma_node_get_transport); enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) { enum rdma_transport_type lt; - if (device->get_link_layer) - return device->get_link_layer(device, port_num); + if (device->ops.get_link_layer) + return device->ops.get_link_layer(device, port_num); lt = rdma_node_get_transport(device->node_type); if (lt == RDMA_TRANSPORT_IB) @@ -243,7 +255,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, struct ib_pd *pd; int mr_access_flags = 0; - pd = device->alloc_pd(device, NULL, NULL); + pd = device->ops.alloc_pd(device, NULL, NULL); if (IS_ERR(pd)) return pd; @@ -265,12 +277,12 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags, pd->res.type = RDMA_RESTRACK_PD; rdma_restrack_set_task(&pd->res, caller); - rdma_restrack_add(&pd->res); + rdma_restrack_kadd(&pd->res); if (mr_access_flags) { struct ib_mr *mr; - mr = pd->device->get_dma_mr(pd, mr_access_flags); + mr = pd->device->ops.get_dma_mr(pd, mr_access_flags); if (IS_ERR(mr)) { ib_dealloc_pd(pd); return ERR_CAST(mr); @@ -307,7 +319,7 @@ void ib_dealloc_pd(struct ib_pd *pd) int ret; if (pd->__internal_mr) { - ret = pd->device->dereg_mr(pd->__internal_mr); + ret = pd->device->ops.dereg_mr(pd->__internal_mr); WARN_ON(ret); pd->__internal_mr = NULL; } @@ -319,7 +331,7 @@ void ib_dealloc_pd(struct ib_pd *pd) rdma_restrack_del(&pd->res); /* Making delalloc_pd a void return is a WIP, no driver should return an error here. */ - ret = pd->device->dealloc_pd(pd); + ret = pd->device->ops.dealloc_pd(pd); WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd"); } EXPORT_SYMBOL(ib_dealloc_pd); @@ -475,14 +487,17 @@ rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr, static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, + u32 flags, struct ib_udata *udata) { struct ib_ah *ah; - if (!pd->device->create_ah) + might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE); + + if (!pd->device->ops.create_ah) return ERR_PTR(-EOPNOTSUPP); - ah = pd->device->create_ah(pd, ah_attr, udata); + ah = pd->device->ops.create_ah(pd, ah_attr, flags, udata); if (!IS_ERR(ah)) { ah->device = pd->device; @@ -502,12 +517,14 @@ static struct ib_ah *_rdma_create_ah(struct ib_pd *pd, * given address vector. * @pd: The protection domain associated with the address handle. * @ah_attr: The attributes of the address vector. + * @flags: Create address handle flags (see enum rdma_create_ah_flags). * * It returns 0 on success and returns appropriate error code on error. * The address handle is used to reference a local or global destination * in all UD QP post sends. */ -struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr) +struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr, + u32 flags) { const struct ib_gid_attr *old_sgid_attr; struct ib_ah *ah; @@ -517,7 +534,7 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr) if (ret) return ERR_PTR(ret); - ah = _rdma_create_ah(pd, ah_attr, NULL); + ah = _rdma_create_ah(pd, ah_attr, flags, NULL); rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); return ah; @@ -557,7 +574,7 @@ struct ib_ah *rdma_create_user_ah(struct ib_pd *pd, } } - ah = _rdma_create_ah(pd, ah_attr, udata); + ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE, udata); out: rdma_unfill_sgid_attr(ah_attr, old_sgid_attr); @@ -869,7 +886,7 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc, if (ret) return ERR_PTR(ret); - ah = rdma_create_ah(pd, &ah_attr); + ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE); rdma_destroy_ah_attr(&ah_attr); return ah; @@ -888,8 +905,8 @@ int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) if (ret) return ret; - ret = ah->device->modify_ah ? - ah->device->modify_ah(ah, ah_attr) : + ret = ah->device->ops.modify_ah ? + ah->device->ops.modify_ah(ah, ah_attr) : -EOPNOTSUPP; ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr); @@ -902,20 +919,22 @@ int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr) { ah_attr->grh.sgid_attr = NULL; - return ah->device->query_ah ? - ah->device->query_ah(ah, ah_attr) : + return ah->device->ops.query_ah ? + ah->device->ops.query_ah(ah, ah_attr) : -EOPNOTSUPP; } EXPORT_SYMBOL(rdma_query_ah); -int rdma_destroy_ah(struct ib_ah *ah) +int rdma_destroy_ah(struct ib_ah *ah, u32 flags) { const struct ib_gid_attr *sgid_attr = ah->sgid_attr; struct ib_pd *pd; int ret; + might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE); + pd = ah->pd; - ret = ah->device->destroy_ah(ah); + ret = ah->device->ops.destroy_ah(ah, flags); if (!ret) { atomic_dec(&pd->usecnt); if (sgid_attr) @@ -933,10 +952,10 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd, { struct ib_srq *srq; - if (!pd->device->create_srq) + if (!pd->device->ops.create_srq) return ERR_PTR(-EOPNOTSUPP); - srq = pd->device->create_srq(pd, srq_init_attr, NULL); + srq = pd->device->ops.create_srq(pd, srq_init_attr, NULL); if (!IS_ERR(srq)) { srq->device = pd->device; @@ -965,17 +984,17 @@ int ib_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr, enum ib_srq_attr_mask srq_attr_mask) { - return srq->device->modify_srq ? - srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) : - -EOPNOTSUPP; + return srq->device->ops.modify_srq ? + srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask, + NULL) : -EOPNOTSUPP; } EXPORT_SYMBOL(ib_modify_srq); int ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) { - return srq->device->query_srq ? - srq->device->query_srq(srq, srq_attr) : -EOPNOTSUPP; + return srq->device->ops.query_srq ? + srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP; } EXPORT_SYMBOL(ib_query_srq); @@ -997,7 +1016,7 @@ int ib_destroy_srq(struct ib_srq *srq) if (srq_type == IB_SRQT_XRC) xrcd = srq->ext.xrc.xrcd; - ret = srq->device->destroy_srq(srq); + ret = srq->device->ops.destroy_srq(srq); if (!ret) { atomic_dec(&pd->usecnt); if (srq_type == IB_SRQT_XRC) @@ -1106,7 +1125,7 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp, if (!IS_ERR(qp)) __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); else - real_qp->device->destroy_qp(real_qp); + real_qp->device->ops.destroy_qp(real_qp); return qp; } @@ -1692,10 +1711,10 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width) if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET) return -EINVAL; - if (!dev->get_netdev) + if (!dev->ops.get_netdev) return -EOPNOTSUPP; - netdev = dev->get_netdev(dev, port_num); + netdev = dev->ops.get_netdev(dev, port_num); if (!netdev) return -ENODEV; @@ -1753,9 +1772,9 @@ int ib_query_qp(struct ib_qp *qp, qp_attr->ah_attr.grh.sgid_attr = NULL; qp_attr->alt_ah_attr.grh.sgid_attr = NULL; - return qp->device->query_qp ? - qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : - -EOPNOTSUPP; + return qp->device->ops.query_qp ? + qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask, + qp_init_attr) : -EOPNOTSUPP; } EXPORT_SYMBOL(ib_query_qp); @@ -1841,7 +1860,7 @@ int ib_destroy_qp(struct ib_qp *qp) rdma_rw_cleanup_mrs(qp); rdma_restrack_del(&qp->res); - ret = qp->device->destroy_qp(qp); + ret = qp->device->ops.destroy_qp(qp); if (!ret) { if (alt_path_sgid_attr) rdma_put_gid_attr(alt_path_sgid_attr); @@ -1879,7 +1898,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device, { struct ib_cq *cq; - cq = device->create_cq(device, cq_attr, NULL, NULL); + cq = device->ops.create_cq(device, cq_attr, NULL, NULL); if (!IS_ERR(cq)) { cq->device = device; @@ -1890,7 +1909,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device, atomic_set(&cq->usecnt, 0); cq->res.type = RDMA_RESTRACK_CQ; rdma_restrack_set_task(&cq->res, caller); - rdma_restrack_add(&cq->res); + rdma_restrack_kadd(&cq->res); } return cq; @@ -1899,8 +1918,9 @@ EXPORT_SYMBOL(__ib_create_cq); int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period) { - return cq->device->modify_cq ? - cq->device->modify_cq(cq, cq_count, cq_period) : -EOPNOTSUPP; + return cq->device->ops.modify_cq ? + cq->device->ops.modify_cq(cq, cq_count, + cq_period) : -EOPNOTSUPP; } EXPORT_SYMBOL(rdma_set_cq_moderation); @@ -1910,14 +1930,14 @@ int ib_destroy_cq(struct ib_cq *cq) return -EBUSY; rdma_restrack_del(&cq->res); - return cq->device->destroy_cq(cq); + return cq->device->ops.destroy_cq(cq); } EXPORT_SYMBOL(ib_destroy_cq); int ib_resize_cq(struct ib_cq *cq, int cqe) { - return cq->device->resize_cq ? - cq->device->resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; + return cq->device->ops.resize_cq ? + cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; } EXPORT_SYMBOL(ib_resize_cq); @@ -1930,7 +1950,7 @@ int ib_dereg_mr(struct ib_mr *mr) int ret; rdma_restrack_del(&mr->res); - ret = mr->device->dereg_mr(mr); + ret = mr->device->ops.dereg_mr(mr); if (!ret) { atomic_dec(&pd->usecnt); if (dm) @@ -1959,10 +1979,10 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, { struct ib_mr *mr; - if (!pd->device->alloc_mr) + if (!pd->device->ops.alloc_mr) return ERR_PTR(-EOPNOTSUPP); - mr = pd->device->alloc_mr(pd, mr_type, max_num_sg); + mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); if (!IS_ERR(mr)) { mr->device = pd->device; mr->pd = pd; @@ -1971,7 +1991,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd, atomic_inc(&pd->usecnt); mr->need_inval = false; mr->res.type = RDMA_RESTRACK_MR; - rdma_restrack_add(&mr->res); + rdma_restrack_kadd(&mr->res); } return mr; @@ -1986,10 +2006,10 @@ struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, { struct ib_fmr *fmr; - if (!pd->device->alloc_fmr) + if (!pd->device->ops.alloc_fmr) return ERR_PTR(-EOPNOTSUPP); - fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); + fmr = pd->device->ops.alloc_fmr(pd, mr_access_flags, fmr_attr); if (!IS_ERR(fmr)) { fmr->device = pd->device; fmr->pd = pd; @@ -2008,7 +2028,7 @@ int ib_unmap_fmr(struct list_head *fmr_list) return 0; fmr = list_entry(fmr_list->next, struct ib_fmr, list); - return fmr->device->unmap_fmr(fmr_list); + return fmr->device->ops.unmap_fmr(fmr_list); } EXPORT_SYMBOL(ib_unmap_fmr); @@ -2018,7 +2038,7 @@ int ib_dealloc_fmr(struct ib_fmr *fmr) int ret; pd = fmr->pd; - ret = fmr->device->dealloc_fmr(fmr); + ret = fmr->device->ops.dealloc_fmr(fmr); if (!ret) atomic_dec(&pd->usecnt); @@ -2070,14 +2090,14 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { int ret; - if (!qp->device->attach_mcast) + if (!qp->device->ops.attach_mcast) return -EOPNOTSUPP; if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) return -EINVAL; - ret = qp->device->attach_mcast(qp, gid, lid); + ret = qp->device->ops.attach_mcast(qp, gid, lid); if (!ret) atomic_inc(&qp->usecnt); return ret; @@ -2088,14 +2108,14 @@ int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) { int ret; - if (!qp->device->detach_mcast) + if (!qp->device->ops.detach_mcast) return -EOPNOTSUPP; if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) return -EINVAL; - ret = qp->device->detach_mcast(qp, gid, lid); + ret = qp->device->ops.detach_mcast(qp, gid, lid); if (!ret) atomic_dec(&qp->usecnt); return ret; @@ -2106,10 +2126,10 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller) { struct ib_xrcd *xrcd; - if (!device->alloc_xrcd) + if (!device->ops.alloc_xrcd) return ERR_PTR(-EOPNOTSUPP); - xrcd = device->alloc_xrcd(device, NULL, NULL); + xrcd = device->ops.alloc_xrcd(device, NULL, NULL); if (!IS_ERR(xrcd)) { xrcd->device = device; xrcd->inode = NULL; @@ -2137,7 +2157,7 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd) return ret; } - return xrcd->device->dealloc_xrcd(xrcd); + return xrcd->device->ops.dealloc_xrcd(xrcd); } EXPORT_SYMBOL(ib_dealloc_xrcd); @@ -2160,10 +2180,10 @@ struct ib_wq *ib_create_wq(struct ib_pd *pd, { struct ib_wq *wq; - if (!pd->device->create_wq) + if (!pd->device->ops.create_wq) return ERR_PTR(-EOPNOTSUPP); - wq = pd->device->create_wq(pd, wq_attr, NULL); + wq = pd->device->ops.create_wq(pd, wq_attr, NULL); if (!IS_ERR(wq)) { wq->event_handler = wq_attr->event_handler; wq->wq_context = wq_attr->wq_context; @@ -2193,7 +2213,7 @@ int ib_destroy_wq(struct ib_wq *wq) if (atomic_read(&wq->usecnt)) return -EBUSY; - err = wq->device->destroy_wq(wq); + err = wq->device->ops.destroy_wq(wq); if (!err) { atomic_dec(&pd->usecnt); atomic_dec(&cq->usecnt); @@ -2215,10 +2235,10 @@ int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, { int err; - if (!wq->device->modify_wq) + if (!wq->device->ops.modify_wq) return -EOPNOTSUPP; - err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL); + err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL); return err; } EXPORT_SYMBOL(ib_modify_wq); @@ -2240,12 +2260,12 @@ struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, int i; u32 table_size; - if (!device->create_rwq_ind_table) + if (!device->ops.create_rwq_ind_table) return ERR_PTR(-EOPNOTSUPP); table_size = (1 << init_attr->log_ind_tbl_size); - rwq_ind_table = device->create_rwq_ind_table(device, - init_attr, NULL); + rwq_ind_table = device->ops.create_rwq_ind_table(device, + init_attr, NULL); if (IS_ERR(rwq_ind_table)) return rwq_ind_table; @@ -2275,7 +2295,7 @@ int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table) if (atomic_read(&rwq_ind_table->usecnt)) return -EBUSY; - err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table); + err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table); if (!err) { for (i = 0; i < table_size; i++) atomic_dec(&ind_tbl[i]->usecnt); @@ -2288,48 +2308,50 @@ EXPORT_SYMBOL(ib_destroy_rwq_ind_table); int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, struct ib_mr_status *mr_status) { - return mr->device->check_mr_status ? - mr->device->check_mr_status(mr, check_mask, mr_status) : -EOPNOTSUPP; + if (!mr->device->ops.check_mr_status) + return -EOPNOTSUPP; + + return mr->device->ops.check_mr_status(mr, check_mask, mr_status); } EXPORT_SYMBOL(ib_check_mr_status); int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, int state) { - if (!device->set_vf_link_state) + if (!device->ops.set_vf_link_state) return -EOPNOTSUPP; - return device->set_vf_link_state(device, vf, port, state); + return device->ops.set_vf_link_state(device, vf, port, state); } EXPORT_SYMBOL(ib_set_vf_link_state); int ib_get_vf_config(struct ib_device *device, int vf, u8 port, struct ifla_vf_info *info) { - if (!device->get_vf_config) + if (!device->ops.get_vf_config) return -EOPNOTSUPP; - return device->get_vf_config(device, vf, port, info); + return device->ops.get_vf_config(device, vf, port, info); } EXPORT_SYMBOL(ib_get_vf_config); int ib_get_vf_stats(struct ib_device *device, int vf, u8 port, struct ifla_vf_stats *stats) { - if (!device->get_vf_stats) + if (!device->ops.get_vf_stats) return -EOPNOTSUPP; - return device->get_vf_stats(device, vf, port, stats); + return device->ops.get_vf_stats(device, vf, port, stats); } EXPORT_SYMBOL(ib_get_vf_stats); int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid, int type) { - if (!device->set_vf_guid) + if (!device->ops.set_vf_guid) return -EOPNOTSUPP; - return device->set_vf_guid(device, vf, port, guid, type); + return device->ops.set_vf_guid(device, vf, port, guid, type); } EXPORT_SYMBOL(ib_set_vf_guid); @@ -2361,12 +2383,12 @@ EXPORT_SYMBOL(ib_set_vf_guid); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size) { - if (unlikely(!mr->device->map_mr_sg)) + if (unlikely(!mr->device->ops.map_mr_sg)) return -EOPNOTSUPP; mr->page_size = page_size; - return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset); + return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset); } EXPORT_SYMBOL(ib_map_mr_sg); @@ -2565,8 +2587,8 @@ static void __ib_drain_rq(struct ib_qp *qp) */ void ib_drain_sq(struct ib_qp *qp) { - if (qp->device->drain_sq) - qp->device->drain_sq(qp); + if (qp->device->ops.drain_sq) + qp->device->ops.drain_sq(qp); else __ib_drain_sq(qp); } @@ -2593,8 +2615,8 @@ EXPORT_SYMBOL(ib_drain_sq); */ void ib_drain_rq(struct ib_qp *qp) { - if (qp->device->drain_rq) - qp->device->drain_rq(qp); + if (qp->device->ops.drain_rq) + qp->device->ops.drain_rq(qp); else __ib_drain_rq(qp); } @@ -2632,10 +2654,11 @@ struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num, struct net_device *netdev; int rc; - if (!device->rdma_netdev_get_params) + if (!device->ops.rdma_netdev_get_params) return ERR_PTR(-EOPNOTSUPP); - rc = device->rdma_netdev_get_params(device, port_num, type, ¶ms); + rc = device->ops.rdma_netdev_get_params(device, port_num, type, + ¶ms); if (rc) return ERR_PTR(rc); @@ -2657,10 +2680,11 @@ int rdma_init_netdev(struct ib_device *device, u8 port_num, struct rdma_netdev_alloc_params params; int rc; - if (!device->rdma_netdev_get_params) + if (!device->ops.rdma_netdev_get_params) return -EOPNOTSUPP; - rc = device->rdma_netdev_get_params(device, port_num, type, ¶ms); + rc = device->ops.rdma_netdev_get_params(device, port_num, type, + ¶ms); if (rc) return rc; |