diff options
author | Yangyang Li <liyangyang20@huawei.com> | 2023-09-26 16:00:26 +0300 |
---|---|---|
committer | Leon Romanovsky <leon@kernel.org> | 2023-10-02 11:47:08 +0300 |
commit | c9813b0b9992eab01a47ad8d10f0add13f898692 (patch) | |
tree | e09e97292ca45a57782417aa21a5a105948455fe /drivers/infiniband | |
parent | 8dc0fd2f5693ab71f84fb16339c483999a909ab0 (diff) | |
download | linux-c9813b0b9992eab01a47ad8d10f0add13f898692.tar.xz |
RDMA/hns: Support SRQ record doorbell
Compared with normal doorbell, using record doorbell can shorten the
process of ringing the doorbell and reduce the latency.
Add a flag HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB to allow FW to
enable/disable SRQ record doorbell.
If the flag above is set, allocate the dma buffer for SRQ record
doorbell and write the buffer address into SRQC during SRQ creation.
For userspace SRQ, add a flag HNS_ROCE_RSP_SRQ_CAP_RECORD_DB to notify
userspace whether the SRQ record doorbell is enabled.
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
Link: https://lore.kernel.org/r/20230926130026.583088-1-huangjunxian6@hisilicon.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_device.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_srq.c | 85 |
3 files changed, 108 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 2059d90f8f78..1627f3b0ef28 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -146,6 +146,7 @@ enum { HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), HNS_ROCE_CAP_FLAG_STASH = BIT(17), HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19), + HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB = BIT(22), }; #define HNS_ROCE_DB_TYPE_COUNT 2 @@ -453,6 +454,8 @@ struct hns_roce_srq { spinlock_t lock; struct mutex mutex; void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event); + struct hns_roce_db rdb; + u32 cap_flags; }; struct hns_roce_uar_table { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index b48f2c7b2fb0..a17bbf29b64b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -941,20 +941,23 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx) idx_que->head++; } -static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq) +static void update_srq_db(struct hns_roce_srq *srq) { - hr_reg_write(db, DB_TAG, srq->srqn); - hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB); - hr_reg_write(db, DB_PI, srq->idx_que.head); + struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); + struct hns_roce_v2_db db; + + hr_reg_write(&db, DB_TAG, srq->srqn); + hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB); + hr_reg_write(&db, DB_PI, srq->idx_que.head); + + hns_roce_write64(hr_dev, (__le32 *)&db, srq->db_reg); } static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { - struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_srq *srq = to_hr_srq(ibsrq); - struct hns_roce_v2_db srq_db; unsigned long flags; int ret = 0; u32 max_sge; @@ -985,9 +988,11 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, } if (likely(nreq)) { - update_srq_db(&srq_db, srq); - - hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg); + if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) + *srq->rdb.db_record = srq->idx_que.head & + V2_DB_PRODUCER_IDX_M; + else + update_srq_db(srq); } spin_unlock_irqrestore(&srq->lock, flags); @@ -5630,6 +5635,14 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ, to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift)); + if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) { + hr_reg_enable(ctx, SRQC_DB_RECORD_EN); + hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_L, + lower_32_bits(srq->rdb.dma) >> 1); + hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_H, + upper_32_bits(srq->rdb.dma)); + } + return hns_roce_v2_write_srqc_index_queue(srq, ctx); } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 8dae98f827eb..4e2d1c8e164a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -5,6 +5,7 @@ #include <linux/pci.h> #include <rdma/ib_umem.h> +#include <rdma/uverbs_ioctl.h> #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" @@ -387,6 +388,79 @@ static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) free_srq_idx(hr_dev, srq); } +static int get_srq_ucmd(struct hns_roce_srq *srq, struct ib_udata *udata, + struct hns_roce_ib_create_srq *ucmd) +{ + struct ib_device *ibdev = srq->ibsrq.device; + int ret; + + ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd))); + if (ret) { + ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n", ret); + return ret; + } + + return 0; +} + +static void free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, + struct ib_udata *udata) +{ + struct hns_roce_ucontext *uctx; + + if (!(srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) + return; + + srq->cap_flags &= ~HNS_ROCE_SRQ_CAP_RECORD_DB; + if (udata) { + uctx = rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, + ibucontext); + hns_roce_db_unmap_user(uctx, &srq->rdb); + } else { + hns_roce_free_db(hr_dev, &srq->rdb); + } +} + +static int alloc_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, + struct ib_udata *udata, + struct hns_roce_ib_create_srq_resp *resp) +{ + struct hns_roce_ib_create_srq ucmd = {}; + struct hns_roce_ucontext *uctx; + int ret; + + if (udata) { + ret = get_srq_ucmd(srq, udata, &ucmd); + if (ret) + return ret; + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) && + (ucmd.req_cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) { + uctx = rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); + ret = hns_roce_db_map_user(uctx, ucmd.db_addr, + &srq->rdb); + if (ret) + return ret; + + srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB; + } + } else { + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) { + ret = hns_roce_alloc_db(hr_dev, &srq->rdb, 1); + if (ret) + return ret; + + *srq->rdb.db_record = 0; + srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB; + } + srq->db_reg = hr_dev->reg_base + SRQ_DB_REG; + } + + return 0; +} + int hns_roce_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) @@ -407,15 +481,20 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, if (ret) return ret; - ret = alloc_srqn(hr_dev, srq); + ret = alloc_srq_db(hr_dev, srq, udata, &resp); if (ret) goto err_srq_buf; + ret = alloc_srqn(hr_dev, srq); + if (ret) + goto err_srq_db; + ret = alloc_srqc(hr_dev, srq); if (ret) goto err_srqn; if (udata) { + resp.cap_flags = srq->cap_flags; resp.srqn = srq->srqn; if (ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)))) { @@ -424,7 +503,6 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, } } - srq->db_reg = hr_dev->reg_base + SRQ_DB_REG; srq->event = hns_roce_ib_srq_event; refcount_set(&srq->refcount, 1); init_completion(&srq->free); @@ -435,6 +513,8 @@ err_srqc: free_srqc(hr_dev, srq); err_srqn: free_srqn(hr_dev, srq); +err_srq_db: + free_srq_db(hr_dev, srq, udata); err_srq_buf: free_srq_buf(hr_dev, srq); @@ -448,6 +528,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) free_srqc(hr_dev, srq); free_srqn(hr_dev, srq); + free_srq_db(hr_dev, srq, udata); free_srq_buf(hr_dev, srq); return 0; } |