summaryrefslogtreecommitdiff
path: root/fs/proc/task_mmu.c
diff options
context:
space:
mode:
authorMauricio Lin <mauriciolin@gmail.com>2005-09-04 02:55:10 +0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 11:05:49 +0400
commite070ad49f31155d872d8e96cab2142840993e3c0 (patch)
tree16d5bfd3d7627d6616c6b1008fac80e4cf77379e /fs/proc/task_mmu.c
parent00e145b6d59a16dd7740197a18f7abdb3af004a9 (diff)
downloadlinux-e070ad49f31155d872d8e96cab2142840993e3c0.tar.xz
[PATCH] add /proc/pid/smaps
Add a "smaps" entry to /proc/pid: show howmuch memory is resident in each mapping. People that want to perform a memory consumption analysing can use it mainly if someone needs to figure out which libraries can be reduced for embedded systems. So the new features are the physical size of shared and clean [or dirty]; private and clean [or dirty]. Take a look the example below: # cat /proc/4576/smaps 08048000-080dc000 r-xp /bin/bash Size: 592 KB Rss: 500 KB Shared_Clean: 500 KB Shared_Dirty: 0 KB Private_Clean: 0 KB Private_Dirty: 0 KB 080dc000-080e2000 rw-p /bin/bash Size: 24 KB Rss: 24 KB Shared_Clean: 0 KB Shared_Dirty: 0 KB Private_Clean: 0 KB Private_Dirty: 24 KB 080e2000-08116000 rw-p Size: 208 KB Rss: 208 KB Shared_Clean: 0 KB Shared_Dirty: 0 KB Private_Clean: 0 KB Private_Dirty: 208 KB b7e2b000-b7e34000 r-xp /lib/tls/libnss_files-2.3.2.so Size: 36 KB Rss: 12 KB Shared_Clean: 12 KB Shared_Dirty: 0 KB Private_Clean: 0 KB Private_Dirty: 0 KB ... (Includes a cleanup from "Richard Purdie" <rpurdie@rpsys.net>) From: Torsten Foertsch <torsten.foertsch@gmx.net> show_smap calls first show_map and then prints its additional information to the seq_file. show_map checks if all it has to print fits into the buffer and if yes marks the current vma as written. While that is correct for show_map it is not for show_smap. Here the vma should be marked as written only after the additional information is also written. The attached patch cures the problem. It moves the functionality of the show_map function to a new function show_map_internal that is called with an additional struct mem_size_stats* argument. Then show_map calls show_map_internal with NULL as struct mem_size_stats* whereas show_smap calls it with a real pointer. Now the final if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; is done only if the whole entry fits into the buffer. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r--fs/proc/task_mmu.c225
1 files changed, 183 insertions, 42 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 64e84cadfa3c..c7ef3e48e35b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -2,10 +2,13 @@
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
+#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
+
#include <asm/elf.h>
#include <asm/uaccess.h>
+#include <asm/tlbflush.h>
#include "internal.h"
char *task_mem(struct mm_struct *mm, char *buffer)
@@ -89,49 +92,58 @@ static void pad_len_spaces(struct seq_file *m, int len)
seq_printf(m, "%*c", len, ' ');
}
-static int show_map(struct seq_file *m, void *v)
+struct mem_size_stats
+{
+ unsigned long resident;
+ unsigned long shared_clean;
+ unsigned long shared_dirty;
+ unsigned long private_clean;
+ unsigned long private_dirty;
+};
+
+static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
{
struct task_struct *task = m->private;
- struct vm_area_struct *map = v;
- struct mm_struct *mm = map->vm_mm;
- struct file *file = map->vm_file;
- int flags = map->vm_flags;
+ struct vm_area_struct *vma = v;
+ struct mm_struct *mm = vma->vm_mm;
+ struct file *file = vma->vm_file;
+ int flags = vma->vm_flags;
unsigned long ino = 0;
dev_t dev = 0;
int len;
if (file) {
- struct inode *inode = map->vm_file->f_dentry->d_inode;
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
dev = inode->i_sb->s_dev;
ino = inode->i_ino;
}
seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
- map->vm_start,
- map->vm_end,
+ vma->vm_start,
+ vma->vm_end,
flags & VM_READ ? 'r' : '-',
flags & VM_WRITE ? 'w' : '-',
flags & VM_EXEC ? 'x' : '-',
flags & VM_MAYSHARE ? 's' : 'p',
- map->vm_pgoff << PAGE_SHIFT,
+ vma->vm_pgoff << PAGE_SHIFT,
MAJOR(dev), MINOR(dev), ino, &len);
/*
* Print the dentry name for named mappings, and a
* special [heap] marker for the heap:
*/
- if (map->vm_file) {
+ if (file) {
pad_len_spaces(m, len);
- seq_path(m, file->f_vfsmnt, file->f_dentry, "");
+ seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
} else {
if (mm) {
- if (map->vm_start <= mm->start_brk &&
- map->vm_end >= mm->brk) {
+ if (vma->vm_start <= mm->start_brk &&
+ vma->vm_end >= mm->brk) {
pad_len_spaces(m, len);
seq_puts(m, "[heap]");
} else {
- if (map->vm_start <= mm->start_stack &&
- map->vm_end >= mm->start_stack) {
+ if (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack) {
pad_len_spaces(m, len);
seq_puts(m, "[stack]");
@@ -143,24 +155,146 @@ static int show_map(struct seq_file *m, void *v)
}
}
seq_putc(m, '\n');
- if (m->count < m->size) /* map is copied successfully */
- m->version = (map != get_gate_vma(task))? map->vm_start: 0;
+
+ if (mss)
+ seq_printf(m,
+ "Size: %8lu kB\n"
+ "Rss: %8lu kB\n"
+ "Shared_Clean: %8lu kB\n"
+ "Shared_Dirty: %8lu kB\n"
+ "Private_Clean: %8lu kB\n"
+ "Private_Dirty: %8lu kB\n",
+ (vma->vm_end - vma->vm_start) >> 10,
+ mss->resident >> 10,
+ mss->shared_clean >> 10,
+ mss->shared_dirty >> 10,
+ mss->private_clean >> 10,
+ mss->private_dirty >> 10);
+
+ if (m->count < m->size) /* vma is copied successfully */
+ m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
return 0;
}
+static int show_map(struct seq_file *m, void *v)
+{
+ return show_map_internal(m, v, 0);
+}
+
+static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ struct mem_size_stats *mss)
+{
+ pte_t *pte, ptent;
+ unsigned long pfn;
+ struct page *page;
+
+ pte = pte_offset_map(pmd, addr);
+ do {
+ ptent = *pte;
+ if (pte_none(ptent) || !pte_present(ptent))
+ continue;
+
+ mss->resident += PAGE_SIZE;
+ pfn = pte_pfn(ptent);
+ if (!pfn_valid(pfn))
+ continue;
+
+ page = pfn_to_page(pfn);
+ if (page_count(page) >= 2) {
+ if (pte_dirty(ptent))
+ mss->shared_dirty += PAGE_SIZE;
+ else
+ mss->shared_clean += PAGE_SIZE;
+ } else {
+ if (pte_dirty(ptent))
+ mss->private_dirty += PAGE_SIZE;
+ else
+ mss->private_clean += PAGE_SIZE;
+ }
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ pte_unmap(pte - 1);
+ cond_resched_lock(&vma->vm_mm->page_table_lock);
+}
+
+static inline void smaps_pmd_range(struct vm_area_struct *vma, pud_t *pud,
+ unsigned long addr, unsigned long end,
+ struct mem_size_stats *mss)
+{
+ pmd_t *pmd;
+ unsigned long next;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ smaps_pte_range(vma, pmd, addr, next, mss);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static inline void smaps_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
+ unsigned long addr, unsigned long end,
+ struct mem_size_stats *mss)
+{
+ pud_t *pud;
+ unsigned long next;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ smaps_pmd_range(vma, pud, addr, next, mss);
+ } while (pud++, addr = next, addr != end);
+}
+
+static inline void smaps_pgd_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end,
+ struct mem_size_stats *mss)
+{
+ pgd_t *pgd;
+ unsigned long next;
+
+ pgd = pgd_offset(vma->vm_mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ smaps_pud_range(vma, pgd, addr, next, mss);
+ } while (pgd++, addr = next, addr != end);
+}
+
+static int show_smap(struct seq_file *m, void *v)
+{
+ struct vm_area_struct *vma = v;
+ struct mm_struct *mm = vma->vm_mm;
+ struct mem_size_stats mss;
+
+ memset(&mss, 0, sizeof mss);
+
+ if (mm) {
+ spin_lock(&mm->page_table_lock);
+ smaps_pgd_range(vma, vma->vm_start, vma->vm_end, &mss);
+ spin_unlock(&mm->page_table_lock);
+ }
+
+ return show_map_internal(m, v, &mss);
+}
+
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct task_struct *task = m->private;
unsigned long last_addr = m->version;
struct mm_struct *mm;
- struct vm_area_struct *map, *tail_map;
+ struct vm_area_struct *vma, *tail_vma;
loff_t l = *pos;
/*
* We remember last_addr rather than next_addr to hit with
* mmap_cache most of the time. We have zero last_addr at
- * the begining and also after lseek. We will have -1 last_addr
- * after the end of the maps.
+ * the beginning and also after lseek. We will have -1 last_addr
+ * after the end of the vmas.
*/
if (last_addr == -1UL)
@@ -170,47 +304,47 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!mm)
return NULL;
- tail_map = get_gate_vma(task);
+ tail_vma = get_gate_vma(task);
down_read(&mm->mmap_sem);
/* Start with last addr hint */
- if (last_addr && (map = find_vma(mm, last_addr))) {
- map = map->vm_next;
+ if (last_addr && (vma = find_vma(mm, last_addr))) {
+ vma = vma->vm_next;
goto out;
}
/*
- * Check the map index is within the range and do
+ * Check the vma index is within the range and do
* sequential scan until m_index.
*/
- map = NULL;
+ vma = NULL;
if ((unsigned long)l < mm->map_count) {
- map = mm->mmap;
- while (l-- && map)
- map = map->vm_next;
+ vma = mm->mmap;
+ while (l-- && vma)
+ vma = vma->vm_next;
goto out;
}
if (l != mm->map_count)
- tail_map = NULL; /* After gate map */
+ tail_vma = NULL; /* After gate vma */
out:
- if (map)
- return map;
+ if (vma)
+ return vma;
- /* End of maps has reached */
- m->version = (tail_map != NULL)? 0: -1UL;
+ /* End of vmas has been reached */
+ m->version = (tail_vma != NULL)? 0: -1UL;
up_read(&mm->mmap_sem);
mmput(mm);
- return tail_map;
+ return tail_vma;
}
static void m_stop(struct seq_file *m, void *v)
{
struct task_struct *task = m->private;
- struct vm_area_struct *map = v;
- if (map && map != get_gate_vma(task)) {
- struct mm_struct *mm = map->vm_mm;
+ struct vm_area_struct *vma = v;
+ if (vma && vma != get_gate_vma(task)) {
+ struct mm_struct *mm = vma->vm_mm;
up_read(&mm->mmap_sem);
mmput(mm);
}
@@ -219,14 +353,14 @@ static void m_stop(struct seq_file *m, void *v)
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct task_struct *task = m->private;
- struct vm_area_struct *map = v;
- struct vm_area_struct *tail_map = get_gate_vma(task);
+ struct vm_area_struct *vma = v;
+ struct vm_area_struct *tail_vma = get_gate_vma(task);
(*pos)++;
- if (map && (map != tail_map) && map->vm_next)
- return map->vm_next;
+ if (vma && (vma != tail_vma) && vma->vm_next)
+ return vma->vm_next;
m_stop(m, v);
- return (map != tail_map)? tail_map: NULL;
+ return (vma != tail_vma)? tail_vma: NULL;
}
struct seq_operations proc_pid_maps_op = {
@@ -236,6 +370,13 @@ struct seq_operations proc_pid_maps_op = {
.show = show_map
};
+struct seq_operations proc_pid_smaps_op = {
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = show_smap
+};
+
#ifdef CONFIG_NUMA
struct numa_maps {