summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx4/mr.c
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2009-05-08 08:35:13 +0400
committerRoland Dreier <rolandd@cisco.com>2009-05-08 08:35:13 +0400
commit2b6b7d4be487bada8c727df829c25068c7b5e5a3 (patch)
treed1fa04daa97cafe89f04b9f16c8a58925f364203 /drivers/infiniband/hw/mlx4/mr.c
parent56a50adda49b2020156616c4eb15353e0f9ad7de (diff)
downloadlinux-2b6b7d4be487bada8c727df829c25068c7b5e5a3.tar.xz
IB/mlx4: Don't overwrite fast registration page list when posting work request
The low-level mlx4 driver modified the page-list addresses for fast register work requests post send to big-endian, and set a "present" bit. This caused problems later when the consumer attempted to unmap the pages using the page-list (using the list addresses which were assumed to be still in CPU-endian order). Fix the mlx4 driver to allocate two buffers and use a private buffer for the hardware-format bus addresses. This patch fixes <https://bugs.openfabrics.org/show_bug.cgi?id=1571>, an NFS/RDMA server crash. The cause of the crash was found by Vu Pham of Mellanox. The fix is along the lines suggested by Steve Wise in comment #21 in bug 1571. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 8e4d26d56a95..8f3666b20ea4 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -231,7 +231,11 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
if (!mfrpl)
return ERR_PTR(-ENOMEM);
- mfrpl->ibfrpl.page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
+ mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
+ if (!mfrpl->ibfrpl.page_list)
+ goto err_free;
+
+ mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev,
size, &mfrpl->map,
GFP_KERNEL);
if (!mfrpl->ibfrpl.page_list)
@@ -242,6 +246,7 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
return &mfrpl->ibfrpl;
err_free:
+ kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl);
return ERR_PTR(-ENOMEM);
}
@@ -252,8 +257,9 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
int size = page_list->max_page_list_len * sizeof (u64);
- dma_free_coherent(&dev->dev->pdev->dev, size, page_list->page_list,
+ dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list,
mfrpl->map);
+ kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl);
}