summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-10-09 03:34:06 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 11:23:03 +0400
commit8449d21fb49e9824e2736c5febd6b5d287cd2ba1 (patch)
tree8685691f2e75ab7ceeacb05c661fa151270c3ee8
parentb676b293fb48672904ee1b9828cb50b4eed01717 (diff)
downloadlinux-8449d21fb49e9824e2736c5febd6b5d287cd2ba1.tar.xz
mm, thp: fix mlock statistics
NR_MLOCK is only accounted in single page units: there's no logic to handle transparent hugepages. This patch checks the appropriate number of pages to adjust the statistics by so that the correct amount of memory is reflected. Currently: $ grep Mlocked /proc/meminfo Mlocked: 19636 kB #define MAP_SIZE (4 << 30) /* 4GB */ void *ptr = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); mlock(ptr, MAP_SIZE); $ grep Mlocked /proc/meminfo Mlocked: 29844 kB munlock(ptr, MAP_SIZE); $ grep Mlocked /proc/meminfo Mlocked: 19636 kB And with this patch: $ grep Mlock /proc/meminfo Mlocked: 19636 kB mlock(ptr, MAP_SIZE); $ grep Mlock /proc/meminfo Mlocked: 4213664 kB munlock(ptr, MAP_SIZE); $ grep Mlock /proc/meminfo Mlocked: 19636 kB Signed-off-by: David Rientjes <rientjes@google.com> Reported-by: Hugh Dickens <hughd@google.com> Acked-by: Hugh Dickins <hughd@google.com> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/internal.h3
-rw-r--r--mm/mlock.c9
2 files changed, 8 insertions, 4 deletions
diff --git a/mm/internal.h b/mm/internal.h
index f5f295fe11e1..a4fa284f6bc2 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -180,7 +180,8 @@ static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
return 0;
if (!TestSetPageMlocked(page)) {
- inc_zone_page_state(page, NR_MLOCK);
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
+ hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
}
return 1;
diff --git a/mm/mlock.c b/mm/mlock.c
index de7321592897..f0b9ce572fc7 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -56,7 +56,8 @@ void clear_page_mlock(struct page *page)
if (!TestClearPageMlocked(page))
return;
- dec_zone_page_state(page, NR_MLOCK);
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
+ -hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGCLEARED);
if (!isolate_lru_page(page)) {
putback_lru_page(page);
@@ -78,7 +79,8 @@ void mlock_vma_page(struct page *page)
BUG_ON(!PageLocked(page));
if (!TestSetPageMlocked(page)) {
- inc_zone_page_state(page, NR_MLOCK);
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
+ hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
if (!isolate_lru_page(page))
putback_lru_page(page);
@@ -105,7 +107,8 @@ void munlock_vma_page(struct page *page)
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) {
- dec_zone_page_state(page, NR_MLOCK);
+ mod_zone_page_state(page_zone(page), NR_MLOCK,
+ -hpage_nr_pages(page));
if (!isolate_lru_page(page)) {
int ret = SWAP_AGAIN;