summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2026-04-28 19:17:48 +0300
committerJason Gunthorpe <jgg@nvidia.com>2026-05-02 21:30:48 +0300
commit0c99acbc8b6c6dd526ae475a48ee1897b61072fb (patch)
treea7ab1849fd28639613f7576230681bee24255581
parent7d51783d82fea000a9ce96fa1dcf3e0a8cedc4fb (diff)
downloadlinux-0c99acbc8b6c6dd526ae475a48ee1897b61072fb.tar.xz
RDMA/hns: Fix unlocked call to hns_roce_qp_remove()
Sashiko points out that hns_roce_qp_remove() requires the caller to hold locks. The error flow in hns_roce_create_qp_common() doesn't hold those locks for the error unwind so it risks corrupting memory. Grab the same locks the other two callers use. Cc: stable@vger.kernel.org Fixes: e088a685eae9 ("RDMA/hns: Support rq record doorbell for the user space") Link: https://sashiko.dev/#/patchset/0-v2-1c49eeb88c48%2B91-rdma_udata_rep_jgg%40nvidia.com?part=9 Link: https://patch.msgid.link/r/15-v1-41f3135e5565+9d2-rdma_ai_fixes1_jgg@nvidia.com Reviewed-by: Junxian Huang <huangjunxian6@hisilicon.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index f94ba98871f0..bf04ee84a943 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1171,6 +1171,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp_resp resp = {};
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_ib_create_qp ucmd = {};
+ unsigned long flags;
int ret;
mutex_init(&hr_qp->mutex);
@@ -1257,7 +1258,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
return 0;
err_flow_ctrl:
+ spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
+ hns_roce_lock_cqs(init_attr->send_cq ? to_hr_cq(init_attr->send_cq) : NULL,
+ init_attr->recv_cq ? to_hr_cq(init_attr->recv_cq) : NULL);
hns_roce_qp_remove(hr_dev, hr_qp);
+ hns_roce_unlock_cqs(init_attr->send_cq ? to_hr_cq(init_attr->send_cq) : NULL,
+ init_attr->recv_cq ? to_hr_cq(init_attr->recv_cq) : NULL);
+ spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
err_store:
free_qpc(hr_dev, hr_qp);
err_qpc: