summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2017-11-17 19:59:53 +0300
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2018-10-09 12:21:30 +0300
commite006222b57508d58b55d36c6ae6663f5729dad2b (patch)
treec1f3f2c83a80ea41fa9cb40ac77bb53ca07961cb /arch/s390/mm
parent5dff03813f46f267bc1ecb334901e916346692ff (diff)
downloadlinux-e006222b57508d58b55d36c6ae6663f5729dad2b.tar.xz
s390/mm: optimize debugfs ptdump kasan zero page walking
Kasan zero p4d/pud/pmd/pte are always filled in with corresponding kasan zero entries. Walking kasan zero page backed area is time consuming and unnecessary. When kasan zero p4d/pud/pmd is encountered, it eventually points to the kasan zero page always with the same attributes and nothing but it, therefore zero p4d/pud/pmd could be jumped over. Also adds a space between address range and pages number to separate them from each other when pages number is huge. 0x0018000000000000-0x0018000010000000 256M PMD RW X 0x0018000010000000-0x001bfffff0000000 1073741312M PTE RO X 0x001bfffff0000000-0x001bfffff0001000 4K PTE RW X Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/dump_pagetables.c35
1 files changed, 34 insertions, 1 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 5139c24111bc..15ee7dc48215 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -3,6 +3,7 @@
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/kasan.h>
#include <asm/kasan.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@@ -89,7 +90,7 @@ static void note_page(struct seq_file *m, struct pg_state *st,
} else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) {
/* Print the actual finished series */
- seq_printf(m, "0x%0*lx-0x%0*lx",
+ seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address,
width, st->current_address);
delta = (st->current_address - st->start_address) >> 10;
@@ -109,6 +110,17 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}
}
+#ifdef CONFIG_KASAN
+static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st)
+{
+ unsigned int prot;
+
+ prot = pte_val(*kasan_zero_pte) &
+ (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
+ note_page(m, st, prot, 4);
+}
+#endif
+
/*
* The actual page table walker functions. In order to keep the
* implementation of print_prot() short, we only check and pass
@@ -141,6 +153,13 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd;
int i;
+#ifdef CONFIG_KASAN
+ if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) {
+ note_kasan_zero_page(m, st);
+ return;
+ }
+#endif
+
for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
st->current_address = addr;
pmd = pmd_offset(pud, addr);
@@ -165,6 +184,13 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud_t *pud;
int i;
+#ifdef CONFIG_KASAN
+ if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) {
+ note_kasan_zero_page(m, st);
+ return;
+ }
+#endif
+
for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
st->current_address = addr;
pud = pud_offset(p4d, addr);
@@ -188,6 +214,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st,
p4d_t *p4d;
int i;
+#ifdef CONFIG_KASAN
+ if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) {
+ note_kasan_zero_page(m, st);
+ return;
+ }
+#endif
+
for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
st->current_address = addr;
p4d = p4d_offset(pgd, addr);