summaryrefslogtreecommitdiff
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@mellanox.com>2018-07-10 13:31:48 +0300
committerJason Gunthorpe <jgg@mellanox.com>2018-07-13 21:09:57 +0300
commit40ddacf2dda952e0f33b40d850bf5f7403bdbe0f (patch)
treefa48e9042a67efda006a5da0266d40117740db46 /drivers/infiniband/core
parent6869e0004fe16184acd6488f0c637e0081a84a8a (diff)
downloadlinux-40ddacf2dda952e0f33b40d850bf5f7403bdbe0f.tar.xz
RDMA/umem: Don't hold mmap_sem for too long
DMA mapping is time consuming operation and doesn't need to be performed with mmap_sem semaphore is held. The semaphore only needs to be held for accounting and get_user_pages related activities. Signed-off-by: Huy Nguyen <huyn@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/umem.c24
1 files changed, 14 insertions, 10 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 498f59bb4989..abe9924baf7c 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -84,7 +84,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct ib_umem *umem;
struct page **page_list;
struct vm_area_struct **vma_list;
- unsigned long locked;
unsigned long lock_limit;
unsigned long cur_base;
unsigned long npages;
@@ -149,15 +148,16 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
npages = ib_umem_num_pages(umem);
- down_write(&current->mm->mmap_sem);
-
- locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ down_write(&current->mm->mmap_sem);
+ current->mm->pinned_vm += npages;
+ if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
+ up_write(&current->mm->mmap_sem);
ret = -ENOMEM;
goto out;
}
+ up_write(&current->mm->mmap_sem);
cur_base = addr & PAGE_MASK;
@@ -176,14 +176,16 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
need_release = 1;
sg_list_start = umem->sg_head.sgl;
+ down_read(&current->mm->mmap_sem);
while (npages) {
ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
-
- if (ret < 0)
+ if (ret < 0) {
+ up_read(&current->mm->mmap_sem);
goto out;
+ }
umem->npages += ret;
cur_base += ret * PAGE_SIZE;
@@ -199,6 +201,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
/* preparing for next loop */
sg_list_start = sg;
}
+ up_read(&current->mm->mmap_sem);
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
@@ -215,13 +218,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
out:
if (ret < 0) {
+ down_write(&current->mm->mmap_sem);
+ current->mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&current->mm->mmap_sem);
if (need_release)
__ib_umem_release(context->device, umem, 0);
kfree(umem);
- } else
- current->mm->pinned_vm = locked;
+ }
- up_write(&current->mm->mmap_sem);
if (vma_list)
free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);