diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-11-20 11:02:39 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-20 11:02:39 +0300 |
commit | fbc2a06056c9aa3cb8c44bf1cfeb1d260e229e5c (patch) | |
tree | feb2a1c13ad3dff5a8c7ab3c0265e8eca7a0c5a3 /mm/mlock.c | |
parent | a3d732f93785da17e0137210deadb4616f5536fc (diff) | |
parent | ee2f6cc7f9ea2542ad46070ed62ba7aa04d08871 (diff) | |
download | linux-fbc2a06056c9aa3cb8c44bf1cfeb1d260e229e5c.tar.xz |
Merge branch 'linus' into x86/uv
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index 008ea70b7afa..1ada366570cb 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -66,14 +66,10 @@ void __clear_page_mlock(struct page *page) putback_lru_page(page); } else { /* - * Page not on the LRU yet. Flush all pagevecs and retry. + * We lost the race. the page already moved to evictable list. */ - lru_add_drain_all(); - if (!isolate_lru_page(page)) - putback_lru_page(page); - else if (PageUnevictable(page)) + if (PageUnevictable(page)) count_vm_event(UNEVICTABLE_PGSTRANDED); - } } @@ -166,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long addr = start; struct page *pages[16]; /* 16 gives a reasonable batch */ int nr_pages = (end - start) / PAGE_SIZE; - int ret; + int ret = 0; int gup_flags = 0; VM_BUG_ON(start & ~PAGE_MASK); @@ -187,8 +183,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, if (vma->vm_flags & VM_WRITE) gup_flags |= GUP_FLAGS_WRITE; - lru_add_drain_all(); /* push cached pages to LRU */ - while (nr_pages > 0) { int i; @@ -251,8 +245,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, ret = 0; } - lru_add_drain_all(); /* to update stats */ - return ret; /* count entire vma as locked_vm */ } @@ -546,6 +538,8 @@ asmlinkage long sys_mlock(unsigned long start, size_t len) if (!can_do_mlock()) return -EPERM; + lru_add_drain_all(); /* flush pagevec */ + down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; @@ -612,6 +606,8 @@ asmlinkage long sys_mlockall(int flags) if (!can_do_mlock()) goto out; + lru_add_drain_all(); /* flush pagevec */ + down_write(¤t->mm->mmap_sem); lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; |