diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-08-13 02:52:52 +0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-08-12 11:52:53 +0400 |
commit | 912985dce45ef18fcdd9f5439fef054e0e22302a (patch) | |
tree | 394b3be51134bddef200f86dde48aa3e3c7ae94c /include | |
parent | 40c42076ebd362dc69210cccea101ac80b6d4bd4 (diff) | |
download | linux-912985dce45ef18fcdd9f5439fef054e0e22302a.tar.xz |
mm: Make generic weak get_user_pages_fast and EXPORT_GPL it
Out of line get_user_pages_fast fallback implementation, make it a weak
symbol, get rid of CONFIG_HAVE_GET_USER_PAGES_FAST.
Export the symbol to modules so lguest can use it.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mm.h | 20 |
1 files changed, 0 insertions, 20 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 335288bff1b7..fa651609b65d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -834,7 +834,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags); -#ifdef CONFIG_HAVE_GET_USER_PAGES_FAST /* * get_user_pages_fast provides equivalent functionality to get_user_pages, * operating on current and current->mm (force=0 and doesn't return any vmas). @@ -848,25 +847,6 @@ extern int mprotect_fixup(struct vm_area_struct *vma, int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages); -#else -/* - * Should probably be moved to asm-generic, and architectures can include it if - * they don't implement their own get_user_pages_fast. - */ -#define get_user_pages_fast(start, nr_pages, write, pages) \ -({ \ - struct mm_struct *mm = current->mm; \ - int ret; \ - \ - down_read(&mm->mmap_sem); \ - ret = get_user_pages(current, mm, start, nr_pages, \ - write, 0, pages, NULL); \ - up_read(&mm->mmap_sem); \ - \ - ret; \ -}) -#endif - /* * A callback you can register to apply pressure to ageable caches. * |