summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
authorLi Dongyang <dongyang.li@anu.edu.au>2017-08-16 16:31:22 +0300
committerDoug Ledford <dledford@redhat.com>2017-08-22 23:48:23 +0300
commitb588300801f3502a7de5ca897af68019fbb3bc79 (patch)
tree80edf89cc303d0ed997a5d4589a2cb4af6c2b430 /drivers/infiniband/hw/mlx5/qp.c
parent7be05753ccc27ce056d45f06a50d150927a88ed7 (diff)
downloadlinux-b588300801f3502a7de5ca897af68019fbb3bc79.tar.xz
IB/mlx5: use kvmalloc_array for mlx5_ib_wq
We observed multiple times on our Lustre OSS servers that when the system memory is fragmented, kmalloc() in create_kernel_qp() could fail order 4/5 allocations while we still have many free pages. Switch to kvmalloc_array() to allow the operation to contine. Signed-off-by: Li Dongyang <dongyang.li@anu.edu.au> Acked-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c35
1 files changed, 20 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5c7ce9bd466e..e098c97e027a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -965,11 +965,16 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
goto err_free;
}
- qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
- qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
- qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
- qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
- qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
+ qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wrid), GFP_KERNEL);
+ qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wr_data), GFP_KERNEL);
+ qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
+ sizeof(*qp->rq.wrid), GFP_KERNEL);
+ qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.w_list), GFP_KERNEL);
+ qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wqe_head), GFP_KERNEL);
if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
!qp->sq.w_list || !qp->sq.wqe_head) {
@@ -981,11 +986,11 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
return 0;
err_wrid:
- kfree(qp->sq.wqe_head);
- kfree(qp->sq.w_list);
- kfree(qp->sq.wrid);
- kfree(qp->sq.wr_data);
- kfree(qp->rq.wrid);
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
err_free:
@@ -998,11 +1003,11 @@ err_buf:
static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
- kfree(qp->sq.wqe_head);
- kfree(qp->sq.w_list);
- kfree(qp->sq.wrid);
- kfree(qp->sq.wr_data);
- kfree(qp->rq.wrid);
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
mlx5_buf_free(dev->mdev, &qp->buf);
}