diff options
author | Joel Stanley <joel@jms.id.au> | 2020-08-04 10:58:32 +0300 |
---|---|---|
committer | Joel Stanley <joel@jms.id.au> | 2020-08-04 10:58:36 +0300 |
commit | 9524588381df213554a10d3661a9200a4b7f6db2 (patch) | |
tree | a4d281b99dcd28b9a1d3077299c26a5671abff88 /drivers/infiniband | |
parent | 666e403461fd046b37bc95bba49e82ca6a758bea (diff) | |
parent | 67da9e2c2b730b9b788ace749d22d769cf11ee2b (diff) | |
download | linux-dev-5.7.tar.xz |
Merge tag 'v5.7.12' into dev-5.7dev-5.7
This is the 5.7.12 stable release
Signed-off-by: Joel Stanley <joel@jms.id.au>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/cm.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/rdma_core.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/srq_cmd.c | 4 |
4 files changed, 26 insertions, 8 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1c2bf18cda9f..83b66757c7ae 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3723,10 +3723,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, return ret; } cm_id_priv->id.state = IB_CM_IDLE; + spin_lock_irq(&cm.lock); if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); } + spin_unlock_irq(&cm.lock); return 0; } diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c index 75bcbc625616..3ab84fcbaade 100644 --- a/drivers/infiniband/core/rdma_core.c +++ b/drivers/infiniband/core/rdma_core.c @@ -638,9 +638,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj, { struct ib_uverbs_file *ufile = attrs->ufile; - /* alloc_commit consumes the uobj kref */ - uobj->uapi_object->type_class->alloc_commit(uobj); - /* kref is held so long as the uobj is on the uobj list. */ uverbs_uobject_get(uobj); spin_lock_irq(&ufile->uobjects_lock); @@ -650,6 +647,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj, /* matches atomic_set(-1) in alloc_uobj */ atomic_set(&uobj->usecnt, 0); + /* alloc_commit consumes the uobj kref */ + uobj->uapi_object->type_class->alloc_commit(uobj); + /* Matches the down_read in rdma_alloc_begin_uobject */ up_read(&ufile->hw_destroy_rwsem); } diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 3de7606d4a1a..bdeb6500a919 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) */ synchronize_srcu(&dev->odp_srcu); + /* + * All work on the prefetch list must be completed, xa_erase() prevented + * new work from being created. + */ + wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); + + /* + * At this point it is forbidden for any other thread to enter + * pagefault_mr() on this imr. It is already forbidden to call + * pagefault_mr() on an implicit child. Due to this additions to + * implicit_children are prevented. + */ + + /* + * Block destroy_unused_implicit_child_mr() from incrementing + * num_deferred_work. + */ xa_lock(&imr->implicit_children); xa_for_each (&imr->implicit_children, idx, mtt) { __xa_erase(&imr->implicit_children, idx); @@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) xa_unlock(&imr->implicit_children); /* - * num_deferred_work can only be incremented inside the odp_srcu, or - * under xa_lock while the child is in the xarray. Thus at this point - * it is only decreasing, and all work holding it is now on the wq. + * Wait for any concurrent destroy_unused_implicit_child_mr() to + * complete. */ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c index 8fc3630a9d4c..0224231a2e6f 100644 --- a/drivers/infiniband/hw/mlx5/srq_cmd.c +++ b/drivers/infiniband/hw/mlx5/srq_cmd.c @@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn) struct mlx5_srq_table *table = &dev->srq_table; struct mlx5_core_srq *srq; - xa_lock(&table->array); + xa_lock_irq(&table->array); srq = xa_load(&table->array, srqn); if (srq) refcount_inc(&srq->common.refcount); - xa_unlock(&table->array); + xa_unlock_irq(&table->array); return srq; } |