1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/set_memory.h>
#include <linux/ptdump.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/mm.h>
#include <linux/kasan.h>
#include <asm/ptdump.h>
#include <asm/kasan.h>
#include <asm/sections.h>
static unsigned long max_addr;
struct addr_marker {
unsigned long start_address;
const char *name;
};
enum address_markers_idx {
IDENTITY_BEFORE_NR = 0,
IDENTITY_BEFORE_END_NR,
KERNEL_START_NR,
KERNEL_END_NR,
IDENTITY_AFTER_NR,
IDENTITY_AFTER_END_NR,
#ifdef CONFIG_KASAN
KASAN_SHADOW_START_NR,
KASAN_SHADOW_END_NR,
#endif
VMEMMAP_NR,
VMEMMAP_END_NR,
VMALLOC_NR,
VMALLOC_END_NR,
MODULES_NR,
MODULES_END_NR,
};
static struct addr_marker address_markers[] = {
[IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"},
[IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
[IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
[IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
#ifdef CONFIG_KASAN
[KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
[KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
#endif
[VMEMMAP_NR] = {0, "vmemmap Area Start"},
[VMEMMAP_END_NR] = {0, "vmemmap Area End"},
[VMALLOC_NR] = {0, "vmalloc Area Start"},
[VMALLOC_END_NR] = {0, "vmalloc Area End"},
[MODULES_NR] = {0, "Modules Area Start"},
[MODULES_END_NR] = {0, "Modules Area End"},
{ -1, NULL }
};
struct pg_state {
struct ptdump_state ptdump;
struct seq_file *seq;
int level;
unsigned int current_prot;
bool check_wx;
unsigned long wx_pages;
unsigned long start_address;
const struct addr_marker *marker;
};
#define pt_dump_seq_printf(m, fmt, args...) \
({ \
struct seq_file *__m = (m); \
\
if (__m) \
seq_printf(__m, fmt, ##args); \
})
#define pt_dump_seq_puts(m, fmt) \
({ \
struct seq_file *__m = (m); \
\
if (__m) \
seq_printf(__m, fmt); \
})
static void print_prot(struct seq_file *m, unsigned int pr, int level)
{
static const char * const level_name[] =
{ "ASCE", "PGD", "PUD", "PMD", "PTE" };
pt_dump_seq_printf(m, "%s ", level_name[level]);
if (pr & _PAGE_INVALID) {
pt_dump_seq_printf(m, "I\n");
return;
}
pt_dump_seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
pt_dump_seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
#ifdef CONFIG_DEBUG_WX
if (!st->check_wx)
return;
if (st->current_prot & _PAGE_INVALID)
return;
if (st->current_prot & _PAGE_PROTECT)
return;
if (st->current_prot & _PAGE_NOEXEC)
return;
/* The first lowcore page is currently still W+X. */
if (addr == PAGE_SIZE)
return;
WARN_ONCE(1, "s390/mm: Found insecure W+X mapping at address %pS\n",
(void *)st->start_address);
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
#endif /* CONFIG_DEBUG_WX */
}
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
{
int width = sizeof(unsigned long) * 2;
static const char units[] = "KMGTPE";
const char *unit = units;
unsigned long delta;
struct pg_state *st;
struct seq_file *m;
unsigned int prot;
st = container_of(pt_st, struct pg_state, ptdump);
m = st->seq;
prot = val & (_PAGE_PROTECT | _PAGE_NOEXEC);
if (level == 4 && (val & _PAGE_INVALID))
prot = _PAGE_INVALID;
/* For pmd_none() & friends val gets passed as zero. */
if (level != 4 && !val)
prot = _PAGE_INVALID;
/* Final flush from generic code. */
if (level == -1)
addr = max_addr;
if (st->level == -1) {
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
st->start_address = addr;
st->current_prot = prot;
st->level = level;
} else if (prot != st->current_prot || level != st->level ||
addr >= st->marker[1].start_address) {
note_prot_wx(st, addr);
pt_dump_seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address,
width, addr);
delta = (addr - st->start_address) >> 10;
while (!(delta & 0x3ff) && unit[1]) {
delta >>= 10;
unit++;
}
pt_dump_seq_printf(m, "%9lu%c ", delta, *unit);
print_prot(m, st->current_prot, st->level);
while (addr >= st->marker[1].start_address) {
st->marker++;
pt_dump_seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
st->level = level;
}
}
#ifdef CONFIG_DEBUG_WX
void ptdump_check_wx(void)
{
struct pg_state st = {
.ptdump = {
.note_page = note_page,
.range = (struct ptdump_range[]) {
{.start = 0, .end = max_addr},
{.start = 0, .end = 0},
}
},
.seq = NULL,
.level = -1,
.current_prot = 0,
.check_wx = true,
.wx_pages = 0,
.start_address = 0,
.marker = (struct addr_marker[]) {
{ .start_address = 0, .name = NULL},
{ .start_address = -1, .name = NULL},
},
};
if (!MACHINE_HAS_NX)
return;
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
if (st.wx_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages);
else
pr_info("Checked W+X mappings: passed, no unexpected W+X pages found\n");
}
#endif /* CONFIG_DEBUG_WX */
#ifdef CONFIG_PTDUMP_DEBUGFS
static int ptdump_show(struct seq_file *m, void *v)
{
struct pg_state st = {
.ptdump = {
.note_page = note_page,
.range = (struct ptdump_range[]) {
{.start = 0, .end = max_addr},
{.start = 0, .end = 0},
}
},
.seq = m,
.level = -1,
.current_prot = 0,
.check_wx = false,
.wx_pages = 0,
.start_address = 0,
.marker = address_markers,
};
get_online_mems();
mutex_lock(&cpa_mutex);
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
mutex_unlock(&cpa_mutex);
put_online_mems();
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
#endif /* CONFIG_PTDUMP_DEBUGFS */
/*
* Heapsort from lib/sort.c is not a stable sorting algorithm, do a simple
* insertion sort to preserve the original order of markers with the same
* start address.
*/
static void sort_address_markers(void)
{
struct addr_marker tmp;
int i, j;
for (i = 1; i < ARRAY_SIZE(address_markers) - 1; i++) {
tmp = address_markers[i];
for (j = i - 1; j >= 0 && address_markers[j].start_address > tmp.start_address; j--)
address_markers[j + 1] = address_markers[j];
address_markers[j + 1] = tmp;
}
}
static int pt_dump_init(void)
{
/*
* Figure out the maximum virtual address being accessible with the
* kernel ASCE. We need this to keep the page table walker functions
* from accessing non-existent entries.
*/
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
address_markers[MODULES_END_NR].start_address = MODULES_END;
address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
sort_address_markers();
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
#endif /* CONFIG_PTDUMP_DEBUGFS */
return 0;
}
device_initcall(pt_dump_init);
|