summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/hns
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-07 03:35:43 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-07 03:35:43 +0300
commit19fd08b85bc7e0502b55cd726f466df82ee7e777 (patch)
treeb042de4b9a8a9478c528ea950b14d34487375695 /drivers/infiniband/hw/hns
parent28da7be5ebc096ada5e6bc526c623bdd8c47800a (diff)
parentefc365e7290d040fbd43f60b0e97653489a739d4 (diff)
downloadlinux-19fd08b85bc7e0502b55cd726f466df82ee7e777.tar.xz
Merge tag 'for-linus-unmerged' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "Doug and I are at a conference next week so if another PR is sent I expect it to only be bug fixes. Parav noted yesterday that there are some fringe case behavior changes in his work that he would like to fix, and I see that Intel has a number of rc looking patches for HFI1 they posted yesterday. Parav is again the biggest contributor by patch count with his ongoing work to enable container support in the RDMA stack, followed by Leon doing syzkaller inspired cleanups, though most of the actual fixing went to RC. There is one uncomfortable series here fixing the user ABI to actually work as intended in 32 bit mode. There are lots of notes in the commit messages, but the basic summary is we don't think there is an actual 32 bit kernel user of drivers/infiniband for several good reasons. However we are seeing people want to use a 32 bit user space with 64 bit kernel, which didn't completely work today. So in fixing it we required a 32 bit rxe user to upgrade their userspace. rxe users are still already quite rare and we think a 32 bit one is non-existing. - Fix RDMA uapi headers to actually compile in userspace and be more complete - Three shared with netdev pull requests from Mellanox: * 7 patches, mostly to net with 1 IB related one at the back). This series addresses an IRQ performance issue (patch 1), cleanups related to the fix for the IRQ performance problem (patches 2-6), and then extends the fragmented completion queue support that already exists in the net side of the driver to the ib side of the driver (patch 7). * Mostly IB, with 5 patches to net that are needed to support the remaining 10 patches to the IB subsystem. This series extends the current 'representor' framework when the mlx5 driver is in switchdev mode from being a netdev only construct to being a netdev/IB dev construct. The IB dev is limited to raw Eth queue pairs only, but by having an IB dev of this type attached to the representor for a switchdev port, it enables DPDK to work on the switchdev device. * All net related, but needed as infrastructure for the rdma driver - Updates for the hns, i40iw, bnxt_re, cxgb3, cxgb4, hns drivers - SRP performance updates - IB uverbs write path cleanup patch series from Leon - Add RDMA_CM support to ib_srpt. This is disabled by default. Users need to set the port for ib_srpt to listen on in configfs in order for it to be enabled (/sys/kernel/config/target/srpt/discovery_auth/rdma_cm_port) - TSO and Scatter FCS support in mlx4 - Refactor of modify_qp routine to resolve problems seen while working on new code that is forthcoming - More refactoring and updates of RDMA CM for containers support from Parav - mlx5 'fine grained packet pacing', 'ipsec offload' and 'device memory' user API features - Infrastructure updates for the new IOCTL interface, based on increased usage - ABI compatibility bug fixes to fully support 32 bit userspace on 64 bit kernel as was originally intended. See the commit messages for extensive details - Syzkaller bugs and code cleanups motivated by them" * tag 'for-linus-unmerged' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (199 commits) IB/rxe: Fix for oops in rxe_register_device on ppc64le arch IB/mlx5: Device memory mr registration support net/mlx5: Mkey creation command adjustments IB/mlx5: Device memory support in mlx5_ib net/mlx5: Query device memory capabilities IB/uverbs: Add device memory registration ioctl support IB/uverbs: Add alloc/free dm uverbs ioctl support IB/uverbs: Add device memory capabilities reporting IB/uverbs: Expose device memory capabilities to user RDMA/qedr: Fix wmb usage in qedr IB/rxe: Removed GID add/del dummy routines RDMA/qedr: Zero stack memory before copying to user space IB/mlx5: Add ability to hash by IPSEC_SPI when creating a TIR IB/mlx5: Add information for querying IPsec capabilities IB/mlx5: Add IPsec support for egress and ingress {net,IB}/mlx5: Add ipsec helper IB/mlx5: Add modify_flow_action_esp verb IB/mlx5: Add implementation for create and destroy action_xfrm IB/uverbs: Introduce ESP steering match filter IB/uverbs: Add modify ESP flow_action ...
Diffstat (limited to 'drivers/infiniband/hw/hns')
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c54
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c180
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h59
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c80
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c41
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c70
12 files changed, 434 insertions, 78 deletions
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 97bf2cd1cacb..cf03404b9d58 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -7,7 +7,7 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
- hns_roce_cq.o hns_roce_alloc.o
+ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 7dd6a66ea244..d74928621559 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -68,11 +68,9 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
return ERR_PTR(ret);
}
- if (gid_attr.ndev) {
- if (is_vlan_dev(gid_attr.ndev))
- vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
- dev_put(gid_attr.ndev);
- }
+ if (is_vlan_dev(gid_attr.ndev))
+ vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
+ dev_put(gid_attr.ndev);
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index bccc9b54c9ce..14734d0d0b76 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -315,6 +315,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd;
+ struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector;
@@ -354,15 +355,36 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
goto err_cq;
}
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ (udata->outlen >= sizeof(resp))) {
+ ret = hns_roce_db_map_user(to_hr_ucontext(context),
+ ucmd.db_addr, &hr_cq->db);
+ if (ret) {
+ dev_err(dev, "cq record doorbell map failed!\n");
+ goto err_mtt;
+ }
+ hr_cq->db_en = 1;
+ resp.cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
+ }
+
/* Get user space parameters */
uar = &to_hr_ucontext(context)->uar;
} else {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
+ if (ret)
+ goto err_cq;
+
+ hr_cq->set_ci_db = hr_cq->db.db_record;
+ *hr_cq->set_ci_db = 0;
+ }
+
/* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
cq_entries);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
- goto err_cq;
+ goto err_db;
}
uar = &hr_dev->priv_uar;
@@ -375,7 +397,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
hr_cq, vector);
if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
- goto err_mtt;
+ goto err_dbmap;
}
/*
@@ -393,10 +415,10 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
hr_cq->cq_depth = cq_entries;
if (context) {
- if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
- ret = -EFAULT;
+ resp.cqn = hr_cq->cqn;
+ ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (ret)
goto err_cqc;
- }
}
return &hr_cq->ib_cq;
@@ -404,6 +426,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
err_cqc:
hns_roce_free_cq(hr_dev, hr_cq);
+err_dbmap:
+ if (context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ (udata->outlen >= sizeof(resp)))
+ hns_roce_db_unmap_user(to_hr_ucontext(context),
+ &hr_cq->db);
+
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (context)
@@ -412,6 +440,10 @@ err_mtt:
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
hr_cq->ib_cq.cqe);
+err_db:
+ if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
+ hns_roce_free_db(hr_dev, &hr_cq->db);
+
err_cq:
kfree(hr_cq);
return ERR_PTR(ret);
@@ -430,12 +462,20 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
hns_roce_free_cq(hr_dev, hr_cq);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
- if (ib_cq->uobject)
+ if (ib_cq->uobject) {
ib_umem_release(hr_cq->umem);
- else
+
+ if (hr_cq->db_en == 1)
+ hns_roce_db_unmap_user(
+ to_hr_ucontext(ib_cq->uobject->context),
+ &hr_cq->db);
+ } else {
/* Free the buff of stored cq */
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
ib_cq->cqe);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+ hns_roce_free_db(hr_dev, &hr_cq->db);
+ }
kfree(hr_cq);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
new file mode 100644
index 000000000000..ebee2782a573
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2017 Hisilicon Limited.
+ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
+
+int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
+ struct hns_roce_db *db)
+{
+ struct hns_roce_user_db_page *page;
+ int ret = 0;
+
+ mutex_lock(&context->page_mutex);
+
+ list_for_each_entry(page, &context->page_list, list)
+ if (page->user_virt == (virt & PAGE_MASK))
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+ if (!page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ refcount_set(&page->refcount, 1);
+ page->user_virt = (virt & PAGE_MASK);
+ page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
+ PAGE_SIZE, 0, 0);
+ if (IS_ERR(page->umem)) {
+ ret = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+
+ list_add(&page->list, &context->page_list);
+
+found:
+ db->dma = sg_dma_address(page->umem->sg_head.sgl) +
+ (virt & ~PAGE_MASK);
+ db->u.user_page = page;
+ refcount_inc(&page->refcount);
+
+out:
+ mutex_unlock(&context->page_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(hns_roce_db_map_user);
+
+void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
+ struct hns_roce_db *db)
+{
+ mutex_lock(&context->page_mutex);
+
+ refcount_dec(&db->u.user_page->refcount);
+ if (refcount_dec_if_one(&db->u.user_page->refcount)) {
+ list_del(&db->u.user_page->list);
+ ib_umem_release(db->u.user_page->umem);
+ kfree(db->u.user_page);
+ }
+
+ mutex_unlock(&context->page_mutex);
+}
+EXPORT_SYMBOL(hns_roce_db_unmap_user);
+
+static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
+ struct device *dma_device)
+{
+ struct hns_roce_db_pgdir *pgdir;
+
+ pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+ if (!pgdir)
+ return NULL;
+
+ bitmap_fill(pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2);
+ pgdir->bits[0] = pgdir->order0;
+ pgdir->bits[1] = pgdir->order1;
+ pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
+ &pgdir->db_dma, GFP_KERNEL);
+ if (!pgdir->page) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ return pgdir;
+}
+
+static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
+ struct hns_roce_db *db, int order)
+{
+ int o;
+ int i;
+
+ for (o = order; o <= 1; ++o) {
+ i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
+ if (i < HNS_ROCE_DB_PER_PAGE >> o)
+ goto found;
+ }
+
+ return -ENOMEM;
+
+found:
+ clear_bit(i, pgdir->bits[o]);
+
+ i <<= o;
+
+ if (o > order)
+ set_bit(i ^ 1, pgdir->bits[order]);
+
+ db->u.pgdir = pgdir;
+ db->index = i;
+ db->db_record = pgdir->page + db->index;
+ db->dma = pgdir->db_dma + db->index * 4;
+ db->order = order;
+
+ return 0;
+}
+
+int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
+ int order)
+{
+ struct hns_roce_db_pgdir *pgdir;
+ int ret = 0;
+
+ mutex_lock(&hr_dev->pgdir_mutex);
+
+ list_for_each_entry(pgdir, &hr_dev->pgdir_list, list)
+ if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
+ goto out;
+
+ pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev);
+ if (!pgdir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&pgdir->list, &hr_dev->pgdir_list);
+
+ /* This should never fail -- we just allocated an empty page: */
+ WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
+
+out:
+ mutex_unlock(&hr_dev->pgdir_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hns_roce_alloc_db);
+
+void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
+{
+ int o;
+ int i;
+
+ mutex_lock(&hr_dev->pgdir_mutex);
+
+ o = db->order;
+ i = db->index;
+
+ if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
+ clear_bit(i ^ 1, db->u.pgdir->order0);
+ ++o;
+ }
+
+ i >>= o;
+ set_bit(i, db->u.pgdir->bits[o]);
+
+ if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) {
+ dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
+ db->u.pgdir->db_dma);
+ list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir);
+ }
+
+ mutex_unlock(&hr_dev->pgdir_mutex);
+}
+EXPORT_SYMBOL_GPL(hns_roce_free_db);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 165a09b314f6..fb305b7f99a8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -105,6 +105,14 @@
#define PAGES_SHIFT_24 24
#define PAGES_SHIFT_32 32
+enum {
+ HNS_ROCE_SUPPORT_RQ_RECORD_DB = 1 << 0,
+};
+
+enum {
+ HNS_ROCE_SUPPORT_CQ_RECORD_DB = 1 << 0,
+};
+
enum hns_roce_qp_state {
HNS_ROCE_QP_STATE_RST,
HNS_ROCE_QP_STATE_INIT,
@@ -178,7 +186,8 @@ enum {
enum {
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
- HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2)
+ HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
+ HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3)
};
enum hns_roce_mtt_type {
@@ -186,6 +195,10 @@ enum hns_roce_mtt_type {
MTT_TYPE_CQE,
};
+enum {
+ HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
+};
+
#define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0
@@ -203,6 +216,8 @@ struct hns_roce_uar {
struct hns_roce_ucontext {
struct ib_ucontext ibucontext;
struct hns_roce_uar uar;
+ struct list_head page_list;
+ struct mutex page_mutex;
};
struct hns_roce_pd {
@@ -335,6 +350,33 @@ struct hns_roce_buf {
int page_shift;
};
+struct hns_roce_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
+ DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / 2);
+ unsigned long *bits[2];
+ u32 *page;
+ dma_addr_t db_dma;
+};
+
+struct hns_roce_user_db_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ unsigned long user_virt;
+ refcount_t refcount;
+};
+
+struct hns_roce_db {
+ u32 *db_record;
+ union {
+ struct hns_roce_db_pgdir *pgdir;
+ struct hns_roce_user_db_page *user_page;
+ } u;
+ dma_addr_t dma;
+ int index;
+ int order;
+};
+
struct hns_roce_cq_buf {
struct hns_roce_buf hr_buf;
struct hns_roce_mtt hr_mtt;
@@ -343,6 +385,8 @@ struct hns_roce_cq_buf {
struct hns_roce_cq {
struct ib_cq ib_cq;
struct hns_roce_cq_buf hr_buf;
+ struct hns_roce_db db;
+ u8 db_en;
spinlock_t lock;
struct ib_umem *umem;
void (*comp)(struct hns_roce_cq *cq);
@@ -351,6 +395,7 @@ struct hns_roce_cq {
struct hns_roce_uar *uar;
u32 cq_depth;
u32 cons_index;
+ u32 *set_ci_db;
void __iomem *cq_db_l;
u16 *tptr_addr;
int arm_sn;
@@ -466,6 +511,8 @@ struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
struct hns_roce_wq rq;
+ struct hns_roce_db rdb;
+ u8 rdb_en;
u32 doorbell_qpn;
__le32 sq_signal_bits;
u32 sq_next_wqe;
@@ -725,6 +772,8 @@ struct hns_roce_dev {
spinlock_t bt_cmd_lock;
struct hns_roce_ib_iboe iboe;
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
int irq[HNS_ROCE_MAX_IRQ_NUM];
u8 __iomem *reg_base;
struct hns_roce_caps caps;
@@ -930,6 +979,14 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq);
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
+int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
+ struct hns_roce_db *db);
+void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
+ struct hns_roce_db *db);
+int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
+ int order);
+void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
+
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index da13bd7c3ca9..47e1b6ac1e1a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1687,13 +1687,13 @@ static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
- __raw_writeq(cpu_to_le64(in_param), hcr + 0);
- __raw_writeq(cpu_to_le64(out_param), hcr + 2);
- __raw_writel(cpu_to_le32(in_modifier), hcr + 4);
+ writeq(in_param, hcr + 0);
+ writeq(out_param, hcr + 2);
+ writel(in_modifier, hcr + 4);
/* Memory barrier */
wmb();
- __raw_writel(cpu_to_le32(val), hcr + 5);
+ writel(val, hcr + 5);
mmiowb();
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ec638778661c..8b84ab7800d8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -498,7 +498,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
- struct hns_roce_v2_db rq_db;
unsigned long flags;
void *wqe = NULL;
int ret = 0;
@@ -509,7 +508,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&hr_qp->rq.lock, flags);
ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
- if (hr_qp->state == IB_QPS_RESET || hr_qp->state == IB_QPS_ERR) {
+ if (hr_qp->state == IB_QPS_RESET) {
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
*bad_wr = wr;
return -EINVAL;
@@ -564,17 +563,7 @@ out:
/* Memory barrier */
wmb();
- rq_db.byte_4 = 0;
- rq_db.parameter = 0;
-
- roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_TAG_M,
- V2_DB_BYTE_4_TAG_S, hr_qp->qpn);
- roce_set_field(rq_db.byte_4, V2_DB_BYTE_4_CMD_M,
- V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_RQ_DB);
- roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
- V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
-
- hns_roce_write64_k((__le32 *)&rq_db, hr_qp->rq.db_reg_l);
+ *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
@@ -1168,7 +1157,8 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
- HNS_ROCE_CAP_FLAG_RQ_INLINE;
+ HNS_ROCE_CAP_FLAG_RQ_INLINE |
+ HNS_ROCE_CAP_FLAG_RECORD_DB;
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
@@ -1228,14 +1218,14 @@ static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
- __raw_writeq(cpu_to_le64(in_param), hcr + 0);
- __raw_writeq(cpu_to_le64(out_param), hcr + 2);
+ writeq(in_param, hcr + 0);
+ writeq(out_param, hcr + 2);
/* Memory barrier */
wmb();
- __raw_writel(cpu_to_le32(val0), hcr + 4);
- __raw_writel(cpu_to_le32(val1), hcr + 5);
+ writel(val0, hcr + 4);
+ writel(val1, hcr + 5);
mmiowb();
@@ -1507,24 +1497,7 @@ static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
- struct hns_roce_v2_cq_db cq_db;
-
- cq_db.byte_4 = 0;
- cq_db.parameter = 0;
-
- roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
- V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
- roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
- V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
-
- roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
- V2_CQ_DB_PARAMETER_CONS_IDX_S,
- cons_index & ((hr_cq->cq_depth << 1) - 1));
- roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
- V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
-
- hns_roce_write64_k((__be32 *)&cq_db, hr_cq->cq_db_l);
-
+ *hr_cq->set_ci_db = cons_index & 0xffffff;
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -1637,6 +1610,16 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
+ if (hr_cq->db_en)
+ roce_set_bit(cq_context->byte_44_db_record,
+ V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
+
+ roce_set_field(cq_context->byte_44_db_record,
+ V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
+ V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
+ ((u32)hr_cq->db.dma) >> 1);
+ cq_context->db_record_addr = hr_cq->db.dma >> 32;
+
roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
V2_CQC_BYTE_56_CQ_MAX_CNT_M,
V2_CQC_BYTE_56_CQ_MAX_CNT_S,
@@ -2274,6 +2257,23 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
hr_qp->qkey = attr->qkey;
}
+ if (hr_qp->rdb_en) {
+ roce_set_bit(context->byte_68_rq_db,
+ V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_68_rq_db,
+ V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
+ }
+
+ roce_set_field(context->byte_68_rq_db,
+ V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
+ V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
+ ((u32)hr_qp->rdb.dma) >> 1);
+ roce_set_field(qpc_mask->byte_68_rq_db,
+ V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
+ V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
+ context->rq_db_record_addr = hr_qp->rdb.dma >> 32;
+ qpc_mask->rq_db_record_addr = 0;
+
roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 1);
roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
@@ -3211,6 +3211,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
hr_qp->sq.tail = 0;
hr_qp->sq_next_wqe = 0;
hr_qp->next_sge = 0;
+ if (hr_qp->rq.wqe_cnt)
+ *hr_qp->rdb.db_record = 0;
}
out:
@@ -3437,11 +3439,17 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
if (is_user) {
+ if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
+ hns_roce_db_unmap_user(
+ to_hr_ucontext(hr_qp->ibqp.uobject->context),
+ &hr_qp->rdb);
ib_umem_release(hr_qp->umem);
} else {
kfree(hr_qp->sq.wrid);
kfree(hr_qp->rq.wrid);
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+ if (hr_qp->rq.wqe_cnt)
+ hns_roce_free_db(hr_dev, &hr_qp->rdb);
}
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 2bf8a47e3de3..182b6726f783 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -299,6 +299,9 @@ struct hns_roce_v2_cq_context {
#define V2_CQC_BYTE_44_DB_RECORD_EN_S 0
+#define V2_CQC_BYTE_44_DB_RECORD_ADDR_S 1
+#define V2_CQC_BYTE_44_DB_RECORD_ADDR_M GENMASK(31, 1)
+
#define V2_CQC_BYTE_52_CQE_CNT_S 0
#define V2_CQC_BYTE_52_CQE_CNT_M GENMASK(23, 0)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index eb9a69fc7bec..9d48bc07a9e6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -74,12 +74,11 @@ static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
}
-static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
- unsigned int index, const union ib_gid *gid,
+static int hns_roce_add_gid(const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(device);
- u8 port = port_num - 1;
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ u8 port = attr->port_num - 1;
unsigned long flags;
int ret;
@@ -88,20 +87,20 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid,
- attr);
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index,
+ (union ib_gid *)gid, attr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
return ret;
}
-static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
- unsigned int index, void **context)
+static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ struct ib_gid_attr zattr = { };
union ib_gid zgid = { {0} };
- u8 port = port_num - 1;
+ u8 port = attr->port_num - 1;
unsigned long flags;
int ret;
@@ -110,7 +109,7 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
- ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, NULL);
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
@@ -295,12 +294,6 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
return IB_LINK_LAYER_ETHERNET;
}
-static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
- union ib_gid *gid)
-{
- return 0;
-}
-
static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
u16 *pkey)
{
@@ -337,7 +330,7 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
{
int ret = 0;
struct hns_roce_ucontext *context;
- struct hns_roce_ib_alloc_ucontext_resp resp;
+ struct hns_roce_ib_alloc_ucontext_resp resp = {};
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
resp.qp_tab_size = hr_dev->caps.num_qps;
@@ -350,6 +343,11 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (ret)
goto error_fail_uar_alloc;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ INIT_LIST_HEAD(&context->page_list);
+ mutex_init(&context->page_mutex);
+ }
+
ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (ret)
goto error_fail_copy_to_udata;
@@ -476,7 +474,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev;
- ib_dev->query_gid = hns_roce_query_gid;
ib_dev->add_gid = hns_roce_add_gid;
ib_dev->del_gid = hns_roce_del_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
@@ -520,6 +517,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
+ ib_dev->driver_id = RDMA_DRIVER_HNS;
ret = ib_register_device(ib_dev, NULL);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
@@ -659,6 +657,11 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->bt_cmd_lock);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ INIT_LIST_HEAD(&hr_dev->pgdir_list);
+ mutex_init(&hr_dev->pgdir_mutex);
+ }
+
ret = hns_roce_init_uar_table(hr_dev);
if (ret) {
dev_err(dev, "Failed to initialize uar table. aborting\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index da86a8117bd5..f7256d88d38f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -933,7 +933,7 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
out:
- free_page((unsigned long) pages);
+ free_pages((unsigned long) pages, order);
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index bdab2188c04a..4b41e041799c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/pci.h>
+#include <uapi/rdma/hns-abi.h>
#include "hns_roce_device.h"
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
@@ -77,7 +78,9 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
}
if (context) {
- if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) {
+ struct hns_roce_ib_alloc_pd_resp uresp = {.pdn = pd->pdn};
+
+ if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n");
kfree(pd);
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 088973a05882..e289a924e789 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -489,6 +489,15 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
return 0;
}
+static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
+{
+ if (attr->qp_type == IB_QPT_XRC_INI ||
+ attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
+ return 0;
+
+ return 1;
+}
+
static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
struct ib_pd *ib_pd,
struct ib_qp_init_attr *init_attr,
@@ -497,6 +506,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
{
struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_qp ucmd;
+ struct hns_roce_ib_create_qp_resp resp = {};
unsigned long qpn = 0;
int ret = 0;
u32 page_shift;
@@ -602,6 +612,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
dev_err(dev, "hns_roce_ib_umem_write_mtt error for create qp\n");
goto err_mtt;
}
+
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ (udata->outlen >= sizeof(resp)) &&
+ hns_roce_qp_has_rq(init_attr)) {
+ ret = hns_roce_db_map_user(
+ to_hr_ucontext(ib_pd->uobject->context),
+ ucmd.db_addr, &hr_qp->rdb);
+ if (ret) {
+ dev_err(dev, "rp record doorbell map failed!\n");
+ goto err_mtt;
+ }
+ }
} else {
if (init_attr->create_flags &
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
@@ -630,6 +652,16 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ hns_roce_qp_has_rq(init_attr)) {
+ ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
+ if (ret) {
+ dev_err(dev, "rq record doorbell alloc failed!\n");
+ goto err_rq_sge_list;
+ }
+ *hr_qp->rdb.db_record = 0;
+ }
+
/* Allocate QP buf */
page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz;
if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size,
@@ -637,7 +669,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
&hr_qp->hr_buf, page_shift)) {
dev_err(dev, "hns_roce_buf_alloc error!\n");
ret = -ENOMEM;
- goto err_rq_sge_list;
+ goto err_db;
}
hr_qp->mtt.mtt_type = MTT_TYPE_WQE;
@@ -698,17 +730,44 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
else
hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
+ if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
+ (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
+
+ /* indicate kernel supports record db */
+ resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
+ ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (ret)
+ goto err_qp;
+
+ hr_qp->rdb_en = 1;
+ }
hr_qp->event = hns_roce_ib_qp_event;
return 0;
+err_qp:
+ if (init_attr->qp_type == IB_QPT_GSI &&
+ hr_dev->hw_rev == HNS_ROCE_HW_VER1)
+ hns_roce_qp_remove(hr_dev, hr_qp);
+ else
+ hns_roce_qp_free(hr_dev, hr_qp);
+
err_qpn:
if (!sqpn)
hns_roce_release_range_qp(hr_dev, qpn, 1);
err_wrid:
- kfree(hr_qp->sq.wrid);
- kfree(hr_qp->rq.wrid);
+ if (ib_pd->uobject) {
+ if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ (udata->outlen >= sizeof(resp)) &&
+ hns_roce_qp_has_rq(init_attr))
+ hns_roce_db_unmap_user(
+ to_hr_ucontext(ib_pd->uobject->context),
+ &hr_qp->rdb);
+ } else {
+ kfree(hr_qp->sq.wrid);
+ kfree(hr_qp->rq.wrid);
+ }
err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
@@ -719,6 +778,11 @@ err_buf:
else
hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
+err_db:
+ if (!ib_pd->uobject && hns_roce_qp_has_rq(init_attr) &&
+ (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
+ hns_roce_free_db(hr_dev, &hr_qp->rdb);
+
err_rq_sge_list:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);