summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-05-04 23:28:17 +0300
committerJason Gunthorpe <jgg@nvidia.com>2022-05-05 03:16:57 +0300
commita926a903b7dc39a8a949150258c09290998dd812 (patch)
tree115759663a8c9e6a894d9d86cc4f071e895bcb1e /drivers/infiniband
parentef91271c65c12d36e4c2b61c61d4849fb6d11aa0 (diff)
downloadlinux-a926a903b7dc39a8a949150258c09290998dd812.tar.xz
RDMA/rxe: Do not call dev_mc_add/del() under a spinlock
These routines were not intended to be called under a spinlock and will throw debugging warnings: raw_local_irq_restore() called with IRQs enabled WARNING: CPU: 13 PID: 3107 at kernel/locking/irqflag-debug.c:10 warn_bogus_irq_restore+0x2f/0x50 CPU: 13 PID: 3107 Comm: python3 Tainted: G E 5.18.0-rc1+ #7 Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 RIP: 0010:warn_bogus_irq_restore+0x2f/0x50 Call Trace: <TASK> _raw_spin_unlock_irqrestore+0x75/0x80 rxe_attach_mcast+0x304/0x480 [rdma_rxe] ib_attach_mcast+0x88/0xa0 [ib_core] ib_uverbs_attach_mcast+0x186/0x1e0 [ib_uverbs] ib_uverbs_handler_UVERBS_METHOD_INVOKE_WRITE+0xcd/0x140 [ib_uverbs] ib_uverbs_cmd_verbs+0xdb0/0xea0 [ib_uverbs] ib_uverbs_ioctl+0xd2/0x160 [ib_uverbs] do_syscall_64+0x5c/0x80 entry_SYSCALL_64_after_hwframe+0x44/0xae Move them out of the spinlock, it is OK if there is some races setting up the MC reception at the ethernet layer with rbtree lookups. Fixes: 6090a0c4c7c6 ("RDMA/rxe: Cleanup rxe_mcast.c") Link: https://lore.kernel.org/r/20220504202817.98247-1-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c51
1 files changed, 23 insertions, 28 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index ae8f11cb704a..77e45cabd8ea 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -38,13 +38,13 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
}
/**
- * rxe_mcast_delete - delete multicast address from rxe device
+ * rxe_mcast_del - delete multicast address from rxe device
* @rxe: rxe device object
* @mgid: multicast address as a gid
*
* Returns 0 on success else an error
*/
-static int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
+static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
@@ -159,17 +159,10 @@ struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
* @mcg: new mcg object
*
* Context: caller should hold rxe->mcg lock
- * Returns: 0 on success else an error
*/
-static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
- struct rxe_mcg *mcg)
+static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mcg *mcg)
{
- int err;
-
- err = rxe_mcast_add(rxe, mgid);
- if (unlikely(err))
- return err;
-
kref_init(&mcg->ref_cnt);
memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
INIT_LIST_HEAD(&mcg->qp_list);
@@ -184,8 +177,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
*/
kref_get(&mcg->ref_cnt);
__rxe_insert_mcg(mcg);
-
- return 0;
}
/**
@@ -209,6 +200,12 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
if (mcg)
return mcg;
+ /* check to see if we have reached limit */
+ if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
+ err = -ENOMEM;
+ goto err_dec;
+ }
+
/* speculative alloc of new mcg */
mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
if (!mcg)
@@ -218,27 +215,23 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
/* re-check to see if someone else just added it */
tmp = __rxe_lookup_mcg(rxe, mgid);
if (tmp) {
+ spin_unlock_irqrestore(&rxe->mcg_lock, flags);
+ atomic_dec(&rxe->mcg_num);
kfree(mcg);
- mcg = tmp;
- goto out;
- }
-
- if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
- err = -ENOMEM;
- goto err_dec;
+ return tmp;
}
- err = __rxe_init_mcg(rxe, mgid, mcg);
- if (err)
- goto err_dec;
-out:
+ __rxe_init_mcg(rxe, mgid, mcg);
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
- return mcg;
+ /* add mcast address outside of lock */
+ err = rxe_mcast_add(rxe, mgid);
+ if (!err)
+ return mcg;
+
+ kfree(mcg);
err_dec:
atomic_dec(&rxe->mcg_num);
- spin_unlock_irqrestore(&rxe->mcg_lock, flags);
- kfree(mcg);
return ERR_PTR(err);
}
@@ -268,7 +261,6 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
__rxe_remove_mcg(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
- rxe_mcast_delete(mcg->rxe, &mcg->mgid);
atomic_dec(&rxe->mcg_num);
}
@@ -282,6 +274,9 @@ static void rxe_destroy_mcg(struct rxe_mcg *mcg)
{
unsigned long flags;
+ /* delete mcast address outside of lock */
+ rxe_mcast_del(mcg->rxe, &mcg->mgid);
+
spin_lock_irqsave(&mcg->rxe->mcg_lock, flags);
__rxe_destroy_mcg(mcg);
spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);