diff options
Diffstat (limited to 'fs/proc/task_mmu.c')
-rw-r--r-- | fs/proc/task_mmu.c | 84 |
1 files changed, 55 insertions, 29 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 1030a716d155..4540b8f76f16 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -125,7 +125,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) if (!priv->task) return ERR_PTR(-ESRCH); - mm = mm_for_maps(priv->task); + mm = mm_access(priv->task, PTRACE_MODE_READ); if (!mm || IS_ERR(mm)) return mm; down_read(&mm->mmap_sem); @@ -393,6 +393,7 @@ struct mem_size_stats { unsigned long anonymous; unsigned long anonymous_thp; unsigned long swap; + unsigned long nonlinear; u64 pss; }; @@ -402,24 +403,33 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr, { struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = mss->vma; - struct page *page; + pgoff_t pgoff = linear_page_index(vma, addr); + struct page *page = NULL; int mapcount; - if (is_swap_pte(ptent)) { - mss->swap += ptent_size; - return; + if (pte_present(ptent)) { + page = vm_normal_page(vma, addr, ptent); + } else if (is_swap_pte(ptent)) { + swp_entry_t swpent = pte_to_swp_entry(ptent); + + if (!non_swap_entry(swpent)) + mss->swap += ptent_size; + else if (is_migration_entry(swpent)) + page = migration_entry_to_page(swpent); + } else if (pte_file(ptent)) { + if (pte_to_pgoff(ptent) != pgoff) + mss->nonlinear += ptent_size; } - if (!pte_present(ptent)) - return; - - page = vm_normal_page(vma, addr, ptent); if (!page) return; if (PageAnon(page)) mss->anonymous += ptent_size; + if (page->index != pgoff) + mss->nonlinear += ptent_size; + mss->resident += ptent_size; /* Accumulate the size in pages that have been accessed. */ if (pte_young(ptent) || PageReferenced(page)) @@ -521,6 +531,10 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) (vma->vm_flags & VM_LOCKED) ? (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); + if (vma->vm_flags & VM_NONLINEAR) + seq_printf(m, "Nonlinear: %8lu kB\n", + mss.nonlinear >> 10); + if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task->mm)) ? vma->vm_start : 0; @@ -700,6 +714,7 @@ struct pagemapread { #define PM_PRESENT PM_STATUS(4LL) #define PM_SWAP PM_STATUS(2LL) +#define PM_FILE PM_STATUS(1LL) #define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) #define PM_END_OF_BUFFER 1 @@ -733,22 +748,33 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end, return err; } -static u64 swap_pte_to_pagemap_entry(pte_t pte) -{ - swp_entry_t e = pte_to_swp_entry(pte); - return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); -} - -static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte) +static void pte_to_pagemap_entry(pagemap_entry_t *pme, + struct vm_area_struct *vma, unsigned long addr, pte_t pte) { - if (is_swap_pte(pte)) - *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte)) - | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP); - else if (pte_present(pte)) - *pme = make_pme(PM_PFRAME(pte_pfn(pte)) - | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); - else + u64 frame, flags; + struct page *page = NULL; + + if (pte_present(pte)) { + frame = pte_pfn(pte); + flags = PM_PRESENT; + page = vm_normal_page(vma, addr, pte); + } else if (is_swap_pte(pte)) { + swp_entry_t entry = pte_to_swp_entry(pte); + + frame = swp_type(entry) | + (swp_offset(entry) << MAX_SWAPFILES_SHIFT); + flags = PM_SWAP; + if (is_migration_entry(entry)) + page = migration_entry_to_page(entry); + } else { *pme = make_pme(PM_NOT_PRESENT); + return; + } + + if (page && !PageAnon(page)) + flags |= PM_FILE; + + *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -784,7 +810,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, /* find the first VMA at or above 'addr' */ vma = find_vma(walk->mm, addr); - if (pmd_trans_huge_lock(pmd, vma) == 1) { + if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { for (; addr != end; addr += PAGE_SIZE) { unsigned long offset; @@ -815,7 +841,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (vma && (vma->vm_start <= addr) && !is_vm_hugetlb_page(vma)) { pte = pte_offset_map(pmd, addr); - pte_to_pagemap_entry(&pme, *pte); + pte_to_pagemap_entry(&pme, vma, addr, *pte); /* unmap before userspace copy */ pte_unmap(pte); } @@ -869,11 +895,11 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, * For each page in the address space, this file contains one 64-bit entry * consisting of the following: * - * Bits 0-55 page frame number (PFN) if present + * Bits 0-54 page frame number (PFN) if present * Bits 0-4 swap type if swapped - * Bits 5-55 swap offset if swapped + * Bits 5-54 swap offset if swapped * Bits 55-60 page shift (page size = 1<<page shift) - * Bit 61 reserved for future use + * Bit 61 page is file-page or shared-anon * Bit 62 page swapped * Bit 63 page present * @@ -919,7 +945,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, if (!pm.buffer) goto out_task; - mm = mm_for_maps(task); + mm = mm_access(task, PTRACE_MODE_READ); ret = PTR_ERR(mm); if (!mm || IS_ERR(mm)) goto out_free; |