summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-11-22 08:32:19 +0300
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-22 20:13:42 +0300
commitf57e88a8d83de8d844b57e16b84d2f762fe9f092 (patch)
tree32fee3ab5c177bc0f47827dd18ad4471be3b2a74
parentee498ed730283e9cdfc8913f12b90a2246f1a8cc (diff)
downloadlinux-f57e88a8d83de8d844b57e16b84d2f762fe9f092.tar.xz
[PATCH] unpaged: ZERO_PAGE in VM_UNPAGED
It's strange enough to be looking out for anonymous pages in VM_UNPAGED areas, let's not insert the ZERO_PAGE there - though whether it would matter will depend on what we decide about ZERO_PAGE refcounting. But whereas do_anonymous_page may (exceptionally) be called on a VM_UNPAGED area, do_no_page should never be: just BUG_ON. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/char/mem.c2
-rw-r--r--mm/memory.c14
2 files changed, 13 insertions, 3 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 91dd669273e0..29c3b631445a 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -591,7 +591,7 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
goto out_up;
- if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
+ if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED))
break;
count = vma->vm_end - addr;
if (count > size)
diff --git a/mm/memory.c b/mm/memory.c
index 3666a4c6dd22..d1f46f4e4c8a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1812,7 +1812,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl;
pte_t entry;
- if (write_access) {
+ /*
+ * A VM_UNPAGED vma will normally be filled with present ptes
+ * by remap_pfn_range, and never arrive here; but it might have
+ * holes, or if !VM_DONTEXPAND, mremap might have expanded it.
+ * It's weird enough handling anon pages in unpaged vmas, we do
+ * not want to worry about ZERO_PAGEs too (it may or may not
+ * matter if their counts wrap): just give them anon pages.
+ */
+
+ if (write_access || (vma->vm_flags & VM_UNPAGED)) {
/* Allocate our own private page. */
pte_unmap(page_table);
@@ -1887,6 +1896,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
int anon = 0;
pte_unmap(page_table);
+ BUG_ON(vma->vm_flags & VM_UNPAGED);
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
@@ -1962,7 +1972,7 @@ retry:
inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address);
- } else if (!(vma->vm_flags & VM_UNPAGED)) {
+ } else {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
}