summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2021-08-30 15:49:59 +0300
committerJason Gunthorpe <jgg@nvidia.com>2021-08-30 15:49:59 +0300
commit6a217437f9f5482a3f6f2dc5fcd27cf0f62409ac (patch)
treec82270181daeb43eb9984b586784b70f13ef1df4 /drivers/infiniband/hw
parent65f90c8e38c9900692338864156e7824cf8a6501 (diff)
parent79fbd3e1241cea83dded06db2b8bcd5893d877d7 (diff)
downloadlinux-6a217437f9f5482a3f6f2dc5fcd27cf0f62409ac.tar.xz
Merge branch 'sg_nents' into rdma.git for-next
From Maor Gottlieb ==================== Fix the use of nents and orig_nents in the sg table append helpers. The nents should be used by the DMA layer to store the number of DMA mapped sges, the orig_nents is the number of CPU sges. Since the sg append logic doesn't always create a SGL with exactly orig_nents entries store a total_nents as well to allow the table to be properly free'd and reorganize the freeing logic to share across all the use cases. ==================== Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> * 'sg_nents': RDMA: Use the sg_table directly and remove the opencoded version from umem lib/scatterlist: Fix wrong update of orig_nents lib/scatterlist: Provide a dedicated function to support table append
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c12
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c4
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c13
-rw-r--r--drivers/infiniband/hw/irdma/hw.c11
-rw-r--r--drivers/infiniband/hw/irdma/main.c9
-rw-r--r--drivers/infiniband/hw/irdma/type.h3
-rw-r--r--drivers/infiniband/hw/irdma/uk.c5
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c8
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c3
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
22 files changed, 58 insertions, 68 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 3edf66818e4b..b3baffe861a6 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -120,6 +120,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
if (!chip_ctx)
return -ENOMEM;
chip_ctx->chip_num = bp->chip_num;
+ chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
rdev->chip_ctx = chip_ctx;
/* rest members to follow eventually */
@@ -550,6 +551,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
dma_addr_t dma_map,
u32 *fw_stats_ctx_id)
{
+ struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
struct hwrm_stat_ctx_alloc_output resp = {0};
struct hwrm_stat_ctx_alloc_input req = {0};
struct bnxt_en_dev *en_dev = rdev->en_dev;
@@ -566,7 +568,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
req.stats_dma_addr = cpu_to_le64(dma_map);
- req.stats_dma_length = cpu_to_le16(sizeof(struct ctx_hw_stats_ext));
+ req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 17f0701b3cee..44282a8cdd4f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -56,6 +56,7 @@
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats);
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats);
/* PBL */
@@ -559,7 +560,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
goto fail;
stats_alloc:
/* Stats */
- rc = bnxt_qplib_alloc_stats_ctx(res->pdev, &ctx->stats);
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
if (rc)
goto fail;
@@ -889,15 +890,12 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
}
static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
struct bnxt_qplib_stats *stats)
{
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
- /* 128 byte aligned context memory is required only for 57500.
- * However making this unconditional, it does not harm previous
- * generation.
- */
- stats->size = ALIGN(sizeof(struct ctx_hw_stats), 128);
+ stats->size = cctx->hw_stats_size;
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL);
if (!stats->dma) {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index c291f495ae91..91031502e8f5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -54,6 +54,7 @@ struct bnxt_qplib_chip_ctx {
u16 chip_num;
u8 chip_rev;
u8 chip_metal;
+ u16 hw_stats_size;
struct bnxt_qplib_drv_modes modes;
};
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 6c8c910f4e86..c7e8d7b3baa1 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -967,6 +967,12 @@ int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return !err || err == -ENODATA ? npolled : err;
}
+void c4iw_cq_rem_ref(struct c4iw_cq *chp)
+{
+ if (refcount_dec_and_test(&chp->refcnt))
+ complete(&chp->cq_rel_comp);
+}
+
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
struct c4iw_cq *chp;
@@ -976,8 +982,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
chp = to_c4iw_cq(ib_cq);
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
- refcount_dec(&chp->refcnt);
- wait_event(chp->wait, !refcount_read(&chp->refcnt));
+ c4iw_cq_rem_ref(chp);
+ wait_for_completion(&chp->cq_rel_comp);
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext);
@@ -1081,7 +1087,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
spin_lock_init(&chp->lock);
spin_lock_init(&chp->comp_handler_lock);
refcount_set(&chp->refcnt, 1);
- init_waitqueue_head(&chp->wait);
+ init_completion(&chp->cq_rel_comp);
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret)
goto err_destroy_cq;
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 7798d090888b..34211a533d5c 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -213,8 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break;
}
done:
- if (refcount_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
+ c4iw_cq_rem_ref(chp);
c4iw_qp_rem_ref(&qhp->ibqp);
out:
return;
@@ -234,8 +233,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
- if (refcount_dec_and_test(&chp->refcnt))
- wake_up(&chp->wait);
+ c4iw_cq_rem_ref(chp);
} else {
pr_debug("unknown cqid 0x%x\n", qid);
xa_unlock_irqrestore(&dev->cqs, flag);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 6a2a415ec791..12f33467c672 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -428,7 +428,7 @@ struct c4iw_cq {
spinlock_t lock;
spinlock_t comp_handler_lock;
refcount_t refcnt;
- wait_queue_head_t wait;
+ struct completion cq_rel_comp;
struct c4iw_wr_wait *wr_waitp;
};
@@ -979,6 +979,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+void c4iw_cq_rem_ref(struct c4iw_cq *chp);
int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 8f68cc3ff193..84f3f2b5f097 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -213,8 +213,10 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
hr_cmd->context =
kcalloc(hr_cmd->max_cmds, sizeof(*hr_cmd->context), GFP_KERNEL);
- if (!hr_cmd->context)
+ if (!hr_cmd->context) {
+ hr_dev->cmd_mod = 0;
return -ENOMEM;
+ }
for (i = 0; i < hr_cmd->max_cmds; ++i) {
hr_cmd->context[i].token = i;
@@ -228,7 +230,6 @@ int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev)
spin_lock_init(&hr_cmd->context_lock);
hr_cmd->use_events = 1;
- down(&hr_cmd->poll_sem);
return 0;
}
@@ -239,8 +240,6 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev)
kfree(hr_cmd->context);
hr_cmd->use_events = 0;
-
- up(&hr_cmd->poll_sem);
}
struct hns_roce_cmd_mailbox *
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index d40ea3d87260..751470c7a2ce 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -42,8 +42,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
found:
offset = virt - page_addr;
- db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
- db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
+ db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
db->u.user_page = page;
refcount_inc(&page->refcount);
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index b40913485699..5d39bd08582a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -859,11 +859,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (hr_dev->cmd_mod) {
ret = hns_roce_cmd_use_events(hr_dev);
- if (ret) {
+ if (ret)
dev_warn(dev,
"Cmd event mode failed, set back to poll!\n");
- hns_roce_cmd_use_polling(hr_dev);
- }
}
ret = hns_roce_init_hem(hr_dev);
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index b1023a7d0bd1..f1e5515256e0 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -2845,7 +2845,7 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
* parses fpm commit info and copy base value
* of hmc objects in hmc_info
*/
-static enum irdma_status_code
+static void
irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
struct irdma_hmc_obj_info *info, u32 *sd)
{
@@ -2915,7 +2915,6 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
else
*sd = (u32)(size >> 21);
- return 0;
}
/**
@@ -4187,11 +4186,9 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
* @dev: sc device struct
* @count: allocate count
*/
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
{
writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
-
- return 0;
}
/**
@@ -4434,9 +4431,9 @@ static enum irdma_status_code irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev,
ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
&commit_fpm_mem, true, wait_type);
if (!ret_code)
- ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
- hmc_info->hmc_obj,
- &hmc_info->sd_table.sd_cnt);
+ irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
+ hmc_info->hmc_obj,
+ &hmc_info->sd_table.sd_cnt);
print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
false);
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 7afb8a6a0526..00de5ee9a260 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -1920,7 +1920,7 @@ enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf)
* irdma_set_hw_rsrc - set hw memory resources.
* @rf: RDMA PCI function
*/
-static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
+static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
{
rf->allocated_qps = (void *)(rf->mem_rsrc +
(sizeof(struct irdma_arp_entry) * rf->arp_table_size));
@@ -1937,8 +1937,6 @@ static u32 irdma_set_hw_rsrc(struct irdma_pci_f *rf)
spin_lock_init(&rf->arp_lock);
spin_lock_init(&rf->qptable_lock);
spin_lock_init(&rf->qh_list_lock);
-
- return 0;
}
/**
@@ -2000,9 +1998,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
- ret = irdma_set_hw_rsrc(rf);
- if (ret)
- goto set_hw_rsrc_fail;
+ irdma_set_hw_rsrc(rf);
set_bit(0, rf->allocated_mrs);
set_bit(0, rf->allocated_qps);
@@ -2025,9 +2021,6 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0;
-set_hw_rsrc_fail:
- kfree(rf->mem_rsrc);
- rf->mem_rsrc = NULL;
mem_rsrc_kzalloc_fail:
kfree(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index ea59432351fb..51a41359e0b4 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -215,10 +215,10 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
}
-static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf)
+static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
+ struct ice_vsi *vsi)
{
struct irdma_pci_f *rf = iwdev->rf;
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
rf->cdev = pf;
rf->gen_ops.register_qset = irdma_lan_register_qset;
@@ -253,12 +253,15 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
struct iidc_auxiliary_dev,
adev);
struct ice_pf *pf = iidc_adev->pf;
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct iidc_qos_params qos_info = {};
struct irdma_device *iwdev;
struct irdma_pci_f *rf;
struct irdma_l2params l2params = {};
int err;
+ if (!vsi)
+ return -EIO;
iwdev = ib_alloc_device(irdma_device, ibdev);
if (!iwdev)
return -ENOMEM;
@@ -268,7 +271,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
return -ENOMEM;
}
- irdma_fill_device_info(iwdev, pf);
+ irdma_fill_device_info(iwdev, pf, vsi);
rf = iwdev->rf;
if (irdma_ctrl_init_hw(rf)) {
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 7387b83e826d..874bc25a938b 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -1222,8 +1222,7 @@ enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
struct irdma_aeq_init_info *info);
enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
struct irdma_aeqe_info *info);
-enum irdma_status_code irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev,
- u32 count);
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
int abi_ver);
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index a6d52c20091c..5fb92de1f015 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -931,7 +931,7 @@ enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
struct irdma_post_rq_info *info)
{
- u32 total_size = 0, wqe_idx, i, byte_off;
+ u32 wqe_idx, i, byte_off;
u32 addl_frag_cnt;
__le64 *wqe;
u64 hdr;
@@ -939,9 +939,6 @@ enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
if (qp->max_rq_frag_cnt < info->num_sges)
return IRDMA_ERR_INVALID_FRAG_COUNT;
- for (i = 0; i < info->num_sges; i++)
- total_size += info->sg_list[i].len;
-
wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
if (!wqe)
return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9b8c451e2426..4fc323402073 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -557,7 +557,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
* @iwqp: qp ptr
* @init_info: initialize info to return
*/
-static int irdma_setup_virt_qp(struct irdma_device *iwdev,
+static void irdma_setup_virt_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *init_info)
{
@@ -574,8 +574,6 @@ static int irdma_setup_virt_qp(struct irdma_device *iwdev,
init_info->sq_pa = qpmr->sq_pbl.addr;
init_info->rq_pa = qpmr->rq_pbl.addr;
}
-
- return 0;
}
/**
@@ -909,7 +907,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
}
}
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
- err_code = irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+ irdma_setup_virt_qp(iwdev, iwqp, &init_info);
} else {
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
@@ -2231,7 +2229,7 @@ static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
- iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
+ iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
*pbl = rdma_block_iter_dma_address(&biter);
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index d41f03ccb0e1..9bbd695a9fd5 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -75,7 +75,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
list_add(&page->list, &context->db_page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 50becc0e4b62..04a67b481608 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -200,7 +200,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
mtt_shift = mtt->page_shift;
mtt_size = 1ULL << mtt_shift;
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
if (cur_start_addr + len == sg_dma_address(sg)) {
/* still the same block */
len += sg_dma_len(sg);
@@ -273,7 +273,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
*num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
- for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
+ for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
/*
* Initialization - save the first chunk start as the
* current_block_start - block means contiguous pages.
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 18b55d2eba40..a190fb581591 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -945,7 +945,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
u32 *cqb = NULL;
void *cqc;
int cqe_size;
- unsigned int irqn;
int eqn;
int err;
@@ -984,7 +983,7 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
- err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
+ err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
if (err)
goto err_cqb;
@@ -1007,7 +1006,6 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cqb;
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
- cq->mcq.irqn = irqn;
if (udata)
cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
else
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index e994aefcc40f..e95967aefe78 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -975,7 +975,6 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
struct mlx5_ib_dev *dev;
int user_vector;
int dev_eqn;
- unsigned int irqn;
int err;
if (uverbs_copy_from(&user_vector, attrs,
@@ -987,7 +986,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
- err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
+ err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
if (err < 0)
return err;
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
index 9ca2e61807ec..6398e2f48579 100644
--- a/drivers/infiniband/hw/mlx5/doorbell.c
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -78,7 +78,8 @@ int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
list_add(&page->list, &context->db_page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (virt & ~PAGE_MASK);
db->u.user_page = page;
++page->refcnt;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3263851ea574..a520ac8ab68c 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -531,8 +531,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
*/
spin_unlock_irq(&ent->lock);
need_delay = need_resched() || someone_adding(cache) ||
- time_after(jiffies,
- READ_ONCE(cache->last_add) + 300 * HZ);
+ !time_after(jiffies,
+ READ_ONCE(cache->last_add) + 300 * HZ);
spin_lock_irq(&ent->lock);
if (ent->disabled)
goto out;
@@ -1226,7 +1226,8 @@ int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
orig_sg_length = sg.length;
cur_mtt = mtt;
- rdma_for_each_block (mr->umem->sg_head.sgl, &biter, mr->umem->nmap,
+ rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter,
+ mr->umem->sgt_append.sgt.nents,
BIT(mr->page_shift)) {
if (cur_mtt == (void *)mtt + sg.length) {
dma_sync_single_for_device(ddev, sg.addr, sg.length,
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 18caf2ab0cc1..3fbf172dbbef 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1495,7 +1495,7 @@ static int qedr_init_srq_user_params(struct ib_udata *udata,
return PTR_ERR(srq->prod_umem);
}
- sg = srq->prod_umem->sg_head.sgl;
+ sg = srq->prod_umem->sgt_append.sgt.sgl;
srq->hw_srq.phy_prod_pair_addr = sg_dma_address(sg);
return 0;