1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
|
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Andes Technology Corporation
#include <linux/pfn.h>
#include <linux/init_task.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
#include <asm/pgalloc.h>
static __init void *early_alloc(size_t size, int node)
{
void *ptr = memblock_alloc_try_nid(size, size,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
if (!ptr)
panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
__func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
return ptr;
}
extern pgd_t early_pg_dir[PTRS_PER_PGD];
asmlinkage void __init kasan_early_init(void)
{
uintptr_t i;
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_early_shadow_pte + i,
mk_pte(virt_to_page(kasan_early_shadow_page),
PAGE_KERNEL));
for (i = 0; i < PTRS_PER_PMD; ++i)
set_pmd(kasan_early_shadow_pmd + i,
pfn_pmd(PFN_DOWN
(__pa((uintptr_t) kasan_early_shadow_pte)),
__pgprot(_PAGE_TABLE)));
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
pfn_pgd(PFN_DOWN
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
/* init for swapper_pg_dir */
pgd = pgd_offset_k(KASAN_SHADOW_START);
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
pfn_pgd(PFN_DOWN
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
local_flush_tlb_all();
}
static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pte_t *ptep, *base_pte;
if (pmd_none(*pmd))
base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
else
base_pte = (pte_t *)pmd_page_vaddr(*pmd);
ptep = base_pte + pte_index(vaddr);
do {
if (pte_none(*ptep)) {
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
}
} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
}
static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pmd_t *pmdp, *base_pmd;
unsigned long next;
base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
if (base_pmd == lm_alias(kasan_early_shadow_pmd))
base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
pmdp = base_pmd + pmd_index(vaddr);
do {
next = pmd_addr_end(vaddr, end);
if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
if (phys_addr) {
set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
continue;
}
}
kasan_populate_pte(pmdp, vaddr, next);
} while (pmdp++, vaddr = next, vaddr != end);
/*
* Wait for the whole PGD to be populated before setting the PGD in
* the page table, otherwise, if we did set the PGD before populating
* it entirely, memblock could allocate a page at a physical address
* where KASAN is not populated yet and then we'd get a page fault.
*/
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
}
static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pgd_t *pgdp = pgd_offset_k(vaddr);
unsigned long next;
do {
next = pgd_addr_end(vaddr, end);
/*
* pgdp can't be none since kasan_early_init initialized all KASAN
* shadow region with kasan_early_shadow_pmd: if this is stillthe case,
* that means we can try to allocate a hugepage as a replacement.
*/
if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
if (phys_addr) {
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
continue;
}
}
kasan_populate_pmd(pgdp, vaddr, next);
} while (pgdp++, vaddr = next, vaddr != end);
}
static void __init kasan_populate(void *start, void *end)
{
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
kasan_populate_pgd(vaddr, vend);
local_flush_tlb_all();
memset(start, KASAN_SHADOW_INIT, end - start);
}
static void __init kasan_shallow_populate(void *start, void *end)
{
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
unsigned long pfn;
int index;
void *p;
pud_t *pud_dir, *pud_k;
pgd_t *pgd_dir, *pgd_k;
p4d_t *p4d_dir, *p4d_k;
while (vaddr < vend) {
index = pgd_index(vaddr);
pfn = csr_read(CSR_SATP) & SATP_PPN;
pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
pgd_dir = pgd_offset_k(vaddr);
set_pgd(pgd_dir, *pgd_k);
p4d_dir = p4d_offset(pgd_dir, vaddr);
p4d_k = p4d_offset(pgd_k, vaddr);
vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
pud_dir = pud_offset(p4d_dir, vaddr);
pud_k = pud_offset(p4d_k, vaddr);
if (pud_present(*pud_dir)) {
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
pud_populate(&init_mm, pud_dir, p);
}
vaddr += PAGE_SIZE;
}
local_flush_tlb_all();
}
void __init kasan_init(void)
{
phys_addr_t _start, _end;
u64 i;
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)kasan_mem_to_shadow((void *)
VMEMMAP_END));
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
kasan_shallow_populate(
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
else
kasan_populate_early_shadow(
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
for_each_mem_range(i, &_start, &_end) {
void *start = (void *)__va(_start);
void *end = (void *)__va(_end);
if (start >= end)
break;
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
}
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
mk_pte(virt_to_page(kasan_early_shadow_page),
__pgprot(_PAGE_PRESENT | _PAGE_READ |
_PAGE_ACCESSED)));
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
init_task.kasan_depth = 0;
}
|