diff options
author | Joel Stanley <joel@jms.id.au> | 2020-08-04 10:58:32 +0300 |
---|---|---|
committer | Joel Stanley <joel@jms.id.au> | 2020-08-04 10:58:36 +0300 |
commit | 9524588381df213554a10d3661a9200a4b7f6db2 (patch) | |
tree | a4d281b99dcd28b9a1d3077299c26a5671abff88 /drivers/infiniband/hw/mlx5/odp.c | |
parent | 666e403461fd046b37bc95bba49e82ca6a758bea (diff) | |
parent | 67da9e2c2b730b9b788ace749d22d769cf11ee2b (diff) | |
download | linux-dev-5.7.tar.xz |
Merge tag 'v5.7.12' into dev-5.7dev-5.7
This is the 5.7.12 stable release
Signed-off-by: Joel Stanley <joel@jms.id.au>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/odp.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/odp.c | 22 |
1 files changed, 19 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 3de7606d4a1a..bdeb6500a919 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) */ synchronize_srcu(&dev->odp_srcu); + /* + * All work on the prefetch list must be completed, xa_erase() prevented + * new work from being created. + */ + wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); + + /* + * At this point it is forbidden for any other thread to enter + * pagefault_mr() on this imr. It is already forbidden to call + * pagefault_mr() on an implicit child. Due to this additions to + * implicit_children are prevented. + */ + + /* + * Block destroy_unused_implicit_child_mr() from incrementing + * num_deferred_work. + */ xa_lock(&imr->implicit_children); xa_for_each (&imr->implicit_children, idx, mtt) { __xa_erase(&imr->implicit_children, idx); @@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) xa_unlock(&imr->implicit_children); /* - * num_deferred_work can only be incremented inside the odp_srcu, or - * under xa_lock while the child is in the xarray. Thus at this point - * it is only decreasing, and all work holding it is now on the wq. + * Wait for any concurrent destroy_unused_implicit_child_mr() to + * complete. */ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work)); |