diff options
author | Shaobo Xu <xushaobo2@huawei.com> | 2017-08-30 12:23:09 +0300 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2017-09-27 15:34:56 +0300 |
commit | 9766edc34ea17a8264b76696367aeb88a52ab108 (patch) | |
tree | 96a9fb63f353e536d50bf7d7e56c0ff0220e8967 /drivers/infiniband/hw/hns/hns_roce_mr.c | |
parent | 6a93c77afe088225363f6941a29fff415b1f7172 (diff) | |
download | linux-9766edc34ea17a8264b76696367aeb88a52ab108.tar.xz |
RDMA/hns: Split CQE from MTT in hip08
In hip08, the SQWQE/SGE/RQWQE and CQE have different hop num and
page size, so we need to manage the base address table of the
SQWQE/SGE/RQWQE and CQE separately.
This patch is to split CQE from MTT(SQWQE/SGE/RQWQE).
Signed-off-by: Shaobo Xu <xushaobo2@huawei.com>
Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_mr.c')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_mr.c | 66 |
1 files changed, 51 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index d8f7b41148d1..88b436fb92ee 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -177,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy) } static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, - unsigned long *seg) + unsigned long *seg, u32 mtt_type) { struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; - int ret = 0; + struct hns_roce_hem_table *table; + struct hns_roce_buddy *buddy; + int ret; + + if (mtt_type == MTT_TYPE_WQE) { + buddy = &mr_table->mtt_buddy; + table = &mr_table->mtt_table; + } else { + buddy = &mr_table->mtt_cqe_buddy; + table = &mr_table->mtt_cqe_table; + } - ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg); + ret = hns_roce_buddy_alloc(buddy, order, seg); if (ret == -1) return -1; - if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg, + if (hns_roce_table_get_range(hr_dev, table, *seg, *seg + (1 << order) - 1)) { - hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order); + hns_roce_buddy_free(buddy, *seg, order); return -1; } @@ -198,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order, int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, struct hns_roce_mtt *mtt) { - int ret = 0; + int ret; int i; /* Page num is zero, correspond to DMA memory register */ @@ -217,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift, ++mtt->order; /* Allocate MTT entry */ - ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg); + ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg, + mtt->mtt_type); if (ret == -1) return -ENOMEM; @@ -231,9 +242,19 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt) if (mtt->order < 0) return; - hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); - hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg, - mtt->first_seg + (1 << mtt->order) - 1); + if (mtt->mtt_type == MTT_TYPE_WQE) { + hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, + mtt->order); + hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, + mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); + } else { + hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg, + mtt->order); + hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table, + mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); + } } EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup); @@ -362,7 +383,11 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev, if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1)) return -EINVAL; - table = &hr_dev->mr_table.mtt_table; + if (mtt->mtt_type == MTT_TYPE_WQE) + table = &hr_dev->mr_table.mtt_table; + else + table = &hr_dev->mr_table.mtt_cqe_table; + mtts = hns_roce_table_find(hr_dev, table, mtt->first_seg + s / hr_dev->caps.mtt_entry_sz, &dma_handle); @@ -409,9 +434,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev, int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt, struct hns_roce_buf *buf) { - u32 i = 0; - int ret = 0; - u64 *page_list = NULL; + u64 *page_list; + int ret; + u32 i; page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL); if (!page_list) @@ -434,7 +459,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev, int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) { struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; - int ret = 0; + int ret; ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap, hr_dev->caps.num_mtpts, @@ -448,8 +473,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev) if (ret) goto err_buddy; + if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) { + ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy, + ilog2(hr_dev->caps.num_cqe_segs)); + if (ret) + goto err_buddy_cqe; + } return 0; +err_buddy_cqe: + hns_roce_buddy_cleanup(&mr_table->mtt_buddy); + err_buddy: hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); return ret; @@ -460,6 +494,8 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev) struct hns_roce_mr_table *mr_table = &hr_dev->mr_table; hns_roce_buddy_cleanup(&mr_table->mtt_buddy); + if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) + hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy); hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap); } |