diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/umem_odp.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/main.c | 23 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/erdma/erdma_verbs.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_restrack.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_net.c | 29 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_qp.c | 2 |
11 files changed, 55 insertions, 66 deletions
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index b1c44ec1a3f3..572a91a62a7b 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -115,7 +115,7 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp, out_free_map: if (ib_uses_virt_dma(dev)) - kfree(map->pfn_list); + kvfree(map->pfn_list); else hmm_dma_map_free(dev->dma_device, map); return ret; @@ -287,7 +287,7 @@ static void ib_umem_odp_free(struct ib_umem_odp *umem_odp) mutex_unlock(&umem_odp->umem_mutex); mmu_interval_notifier_remove(&umem_odp->notifier); if (ib_uses_virt_dma(dev)) - kfree(umem_odp->map.pfn_list); + kvfree(umem_odp->map.pfn_list); else hmm_dma_map_free(dev->dma_device, &umem_odp->map); } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 37c2bc3bdba5..260dc67b8b87 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1921,7 +1921,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq); struct bnxt_re_dev *rdev = srq->rdev; - int rc; switch (srq_attr_mask) { case IB_SRQ_MAX_WR: @@ -1933,11 +1932,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, return -EINVAL; srq->qplib_srq.threshold = srq_attr->srq_limit; - rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq); - if (rc) { - ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!"); - return rc; - } + bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold); + /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 293b0a96c8e3..df7cf8d68e27 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -2017,6 +2017,28 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev) rdev->nqr = NULL; } +/* When DEL_GID fails, driver is not freeing GID ctx memory. + * To avoid the memory leak, free the memory during unload + */ +static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; + struct bnxt_re_gid_ctx *ctx, **ctx_tbl; + int i; + + if (!sgid_tbl->active) + return; + + ctx_tbl = sgid_tbl->ctx; + for (i = 0; i < sgid_tbl->max; i++) { + if (sgid_tbl->hw_id[i] == 0xFFFF) + continue; + + ctx = ctx_tbl[i]; + kfree(ctx); + } +} + static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) { u8 type; @@ -2030,6 +2052,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) cancel_delayed_work_sync(&rdev->worker); + bnxt_re_free_gid_ctx(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags)) bnxt_re_cleanup_res(rdev); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index dfe3177123e5..ee36b3d82cc0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -705,9 +705,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, srq->dbinfo.db = srq->dpi->dbr; srq->dbinfo.max_slot = 1; srq->dbinfo.priv_db = res->dpi_tbl.priv_db; - if (srq->threshold) - bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); - srq->arm_req = false; + bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA); return 0; fail: @@ -717,24 +715,6 @@ fail: return rc; } -int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, - struct bnxt_qplib_srq *srq) -{ - struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; - u32 count; - - count = __bnxt_qplib_get_avail(srq_hwq); - if (count > srq->threshold) { - srq->arm_req = false; - bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); - } else { - /* Deferred arming */ - srq->arm_req = true; - } - - return 0; -} - int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq) { @@ -776,7 +756,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, struct bnxt_qplib_hwq *srq_hwq = &srq->hwq; struct rq_wqe *srqe; struct sq_sge *hw_sge; - u32 count = 0; int i, next; spin_lock(&srq_hwq->lock); @@ -808,15 +787,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq, bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot); - spin_lock(&srq_hwq->lock); - count = __bnxt_qplib_get_avail(srq_hwq); - spin_unlock(&srq_hwq->lock); /* Ring DB */ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ); - if (srq->arm_req == true && count > srq->threshold) { - srq->arm_req = false; - bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold); - } return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index ab125f1d949e..4921a214c34c 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -546,8 +546,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq, srqn_handler_t srq_handler); int bnxt_qplib_create_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); -int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res, - struct bnxt_qplib_srq *srq); int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, struct bnxt_qplib_srq *srq); void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 6cd05207ffed..cc5c82d96839 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, pbl->pg_arr = vmalloc_array(pages, sizeof(void *)); if (!pbl->pg_arr) return -ENOMEM; + memset(pbl->pg_arr, 0, pages * sizeof(void *)); pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t)); if (!pbl->pg_map_arr) { @@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res, pbl->pg_arr = NULL; return -ENOMEM; } + memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t)); pbl->pg_count = 0; pbl->pg_size = sginfo->pgsize; diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c index 94c211df09d8..fdeec33c71da 100644 --- a/drivers/infiniband/hw/erdma/erdma_verbs.c +++ b/drivers/infiniband/hw/erdma/erdma_verbs.c @@ -994,6 +994,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL); if (xa_is_err(old_entry)) ret = xa_err(old_entry); + else + qp->ibqp.qp_num = 1; } else { ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp, XA_LIMIT(1, dev->attrs.max_qp - 1), @@ -1031,7 +1033,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (ret) goto err_out_cmd; } else { - init_kernel_qp(dev, qp, attrs); + ret = init_kernel_qp(dev, qp, attrs); + if (ret) + goto err_out_xa; } qp->attrs.max_send_sge = attrs->cap.max_send_sge; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 64bca08f3f1a..f82bdd46a917 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -3043,7 +3043,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev) if (!hr_dev->is_vf) hns_roce_free_link_table(hr_dev); - if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09) + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) free_dip_entry(hr_dev); } @@ -5476,7 +5476,7 @@ out: return ret; } -static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn, +static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn, void *buffer) { struct hns_roce_v2_scc_context *context; @@ -5488,7 +5488,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn, return PTR_ERR(mailbox); ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC, - qpn); + sccn); if (ret) goto out; diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index f637b73b946e..230187dda6a0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) struct hns_roce_v2_qp_context qpc; struct hns_roce_v2_scc_context sccc; } context = {}; + u32 sccn = hr_qp->qpn; int ret; if (!hr_dev->hw->query_qpc) @@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp) !hr_dev->hw->query_sccc) goto out; - ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc); + if (hr_qp->cong_type == CONG_TYPE_DIP) { + if (!hr_qp->dip) + goto out; + sccn = hr_qp->dip->dip_idx; + } + + ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc); if (ret) ibdev_warn_ratelimited(&hr_dev->ib_dev, "failed to query SCCC, ret = %d.\n", diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 132a87e52d5c..ac0183a2ff7a 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -345,33 +345,15 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, static void rxe_skb_tx_dtor(struct sk_buff *skb) { - struct net_device *ndev = skb->dev; - struct rxe_dev *rxe; - unsigned int qp_index; - struct rxe_qp *qp; + struct rxe_qp *qp = skb->sk->sk_user_data; int skb_out; - rxe = rxe_get_dev_from_net(ndev); - if (!rxe && is_vlan_dev(ndev)) - rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); - if (WARN_ON(!rxe)) - return; - - qp_index = (int)(uintptr_t)skb->sk->sk_user_data; - if (!qp_index) - return; - - qp = rxe_pool_get_index(&rxe->qp_pool, qp_index); - if (!qp) - goto put_dev; - skb_out = atomic_dec_return(&qp->skb_out); - if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW) + if (unlikely(qp->need_req_skb && + skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) rxe_sched_task(&qp->send_task); rxe_put(qp); -put_dev: - ib_device_put(&rxe->ib_dev); sock_put(skb->sk); } @@ -383,6 +365,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; + rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) @@ -405,6 +388,7 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) sock_hold(sk); skb->sk = sk; skb->destructor = rxe_skb_tx_dtor; + rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) @@ -497,6 +481,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, goto out; } + /* Add time stamp to skb. */ + skb->tstamp = ktime_get(); + skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev)); /* FIXME: hold reference to this netdev until life of this skb. */ diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index f2af3e0aef35..95f1c1c2949d 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); if (err < 0) return err; - qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index; + qp->sk->sk->sk_user_data = qp; /* pick a source UDP port number for this QP based on * the source QPN. this spreads traffic for different QPs |