summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-01-06 11:11:44 +0300
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 19:33:33 +0300
commit7ee1dd3fee22f15728f545d266403fc977e1eb99 (patch)
treee2f9f42b0731d5006fa329a590069be6787af1de /include
parent5c40f7f373889930d176a515ec375b60a70b5b49 (diff)
downloadlinux-7ee1dd3fee22f15728f545d266403fc977e1eb99.tar.xz
[PATCH] FRV: Make futex code compilable on nommu [try #2]
Make the futex code compilable and usable on NOMMU by making the attempt to handle page faults conditional on CONFIG_MMU. If this is not enabled, then we can assume that EFAULT returned from futex_atomic_op_inuser() is not recoverable, and that the address lies outside of valid memory. handle_mm_fault() is made to BUG if called on NOMMU without attempting to invoke the actual handler (__handle_mm_fault). Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h22
1 files changed, 19 insertions, 3 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 26f3094911a5..bc01fff3aa01 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -717,12 +717,28 @@ extern int vmtruncate(struct inode * inode, loff_t offset);
extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
-extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
-static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access)
+#ifdef CONFIG_MMU
+extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
+ unsigned long address, int write_access);
+
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
{
- return __handle_mm_fault(mm, vma, address, write_access) & (~VM_FAULT_WRITE);
+ return __handle_mm_fault(mm, vma, address, write_access) &
+ (~VM_FAULT_WRITE);
}
+#else
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
+{
+ /* should never happen if there's no MMU */
+ BUG();
+ return VM_FAULT_SIGBUS;
+}
+#endif
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);