summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/pgtable.c
blob: ab0656115424f4e5d211c87a0550605be9752175 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * This file contains common routines for dealing with free of page tables
 * Along with common page table handling code
 *
 *  Derived from arch/powerpc/mm/tlb_64.c:
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 */

#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/hugetlb.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/hugetlb.h>
#include <asm/pte-walk.h>

#ifdef CONFIG_PPC64
#define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
#else
#define PGD_ALIGN PAGE_SIZE
#endif

pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);

static inline int is_exec_fault(void)
{
	return current->thread.regs && TRAP(current->thread.regs) == 0x400;
}

/* We only try to do i/d cache coherency on stuff that looks like
 * reasonably "normal" PTEs. We currently require a PTE to be present
 * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
 * on userspace PTEs
 */
static inline int pte_looks_normal(pte_t pte, unsigned long addr)
{

	if (pte_present(pte) && !pte_special(pte)) {
		if (pte_ci(pte))
			return 0;
		if (!is_kernel_addr(addr))
			return 1;
	}
	return 0;
}

static struct folio *maybe_pte_to_folio(pte_t pte)
{
	unsigned long pfn = pte_pfn(pte);
	struct page *page;

	if (unlikely(!pfn_valid(pfn)))
		return NULL;
	page = pfn_to_page(pfn);
	if (PageReserved(page))
		return NULL;
	return page_folio(page);
}

#ifdef CONFIG_PPC_BOOK3S

/* Server-style MMU handles coherency when hashing if HW exec permission
 * is supposed per page (currently 64-bit only). If not, then, we always
 * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
 * support falls into the same category.
 */

static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
{
	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
	if (pte_looks_normal(pte, addr) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
					     cpu_has_feature(CPU_FTR_NOEXECUTE))) {
		struct folio *folio = maybe_pte_to_folio(pte);
		if (!folio)
			return pte;
		if (!test_bit(PG_dcache_clean, &folio->flags)) {
			flush_dcache_icache_folio(folio);
			set_bit(PG_dcache_clean, &folio->flags);
		}
	}
	return pte;
}

#else /* CONFIG_PPC_BOOK3S */

static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) { return pte; }

#endif /* CONFIG_PPC_BOOK3S */

/* Embedded type MMU with HW exec support. This is a bit more complicated
 * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
 * instead we "filter out" the exec permission for non clean pages.
 *
 * This is also called once for the folio. So only work with folio->flags here.
 */
static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
{
	struct folio *folio;

	if (radix_enabled())
		return pte;

	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
		return set_pte_filter_hash(pte, addr);

	/* No exec permission in the first place, move on */
	if (!pte_exec(pte) || !pte_looks_normal(pte, addr))
		return pte;

	/* If you set _PAGE_EXEC on weird pages you're on your own */
	folio = maybe_pte_to_folio(pte);
	if (unlikely(!folio))
		return pte;

	/* If the page clean, we move on */
	if (test_bit(PG_dcache_clean, &folio->flags))
		return pte;

	/* If it's an exec fault, we flush the cache and make it clean */
	if (is_exec_fault()) {
		flush_dcache_icache_folio(folio);
		set_bit(PG_dcache_clean, &folio->flags);
		return pte;
	}

	/* Else, we filter out _PAGE_EXEC */
	return pte_exprotect(pte);
}

static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
				     int dirty)
{
	struct folio *folio;

	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
		return pte;

	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
		return pte;

	/* So here, we only care about exec faults, as we use them
	 * to recover lost _PAGE_EXEC and perform I$/D$ coherency
	 * if necessary. Also if _PAGE_EXEC is already set, same deal,
	 * we just bail out
	 */
	if (dirty || pte_exec(pte) || !is_exec_fault())
		return pte;

#ifdef CONFIG_DEBUG_VM
	/* So this is an exec fault, _PAGE_EXEC is not set. If it was
	 * an error we would have bailed out earlier in do_page_fault()
	 * but let's make sure of it
	 */
	if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
		return pte;
#endif /* CONFIG_DEBUG_VM */

	/* If you set _PAGE_EXEC on weird pages you're on your own */
	folio = maybe_pte_to_folio(pte);
	if (unlikely(!folio))
		goto bail;

	/* If the page is already clean, we move on */
	if (test_bit(PG_dcache_clean, &folio->flags))
		goto bail;

	/* Clean the page and set PG_dcache_clean */
	flush_dcache_icache_folio(folio);
	set_bit(PG_dcache_clean, &folio->flags);

 bail:
	return pte_mkexec(pte);
}

/*
 * set_pte stores a linux PTE into the linux page table.
 */
void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
		pte_t pte, unsigned int nr)
{

	/* Note: mm->context.id might not yet have been assigned as
	 * this context might not have been activated yet when this
	 * is called. Filter the pte value and use the filtered value
	 * to setup all the ptes in the range.
	 */
	pte = set_pte_filter(pte, addr);

	/*
	 * We don't need to call arch_enter/leave_lazy_mmu_mode()
	 * because we expect set_ptes to be only be used on not present
	 * and not hw_valid ptes. Hence there is no translation cache flush
	 * involved that need to be batched.
	 */
	for (;;) {

		/*
		 * Make sure hardware valid bit is not set. We don't do
		 * tlb flush for this update.
		 */
		VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));

		/* Perform the setting of the PTE */
		__set_pte_at(mm, addr, ptep, pte, 0);
		if (--nr == 0)
			break;
		ptep++;
		addr += PAGE_SIZE;
		pte = pte_next_pfn(pte);
	}
}

void unmap_kernel_page(unsigned long va)
{
	pmd_t *pmdp = pmd_off_k(va);
	pte_t *ptep = pte_offset_kernel(pmdp, va);

	pte_clear(&init_mm, va, ptep);
	flush_tlb_kernel_range(va, va + PAGE_SIZE);
}

/*
 * This is called when relaxing access to a PTE. It's also called in the page
 * fault path when we don't hit any of the major fault cases, ie, a minor
 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
 * handled those two for us, we additionally deal with missing execute
 * permission here on some processors
 */
int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
			  pte_t *ptep, pte_t entry, int dirty)
{
	int changed;
	entry = set_access_flags_filter(entry, vma, dirty);
	changed = !pte_same(*(ptep), entry);
	if (changed) {
		assert_pte_locked(vma->vm_mm, address);
		__ptep_set_access_flags(vma, ptep, entry,
					address, mmu_virtual_psize);
	}
	return changed;
}

#ifdef CONFIG_HUGETLB_PAGE
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
			       unsigned long addr, pte_t *ptep,
			       pte_t pte, int dirty)
{
#ifdef HUGETLB_NEED_PRELOAD
	/*
	 * The "return 1" forces a call of update_mmu_cache, which will write a
	 * TLB entry.  Without this, platforms that don't do a write of the TLB
	 * entry in the TLB miss handler asm will fault ad infinitum.
	 */
	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
	return 1;
#else
	int changed, psize;

	pte = set_access_flags_filter(pte, vma, dirty);
	changed = !pte_same(*(ptep), pte);
	if (changed) {

#ifdef CONFIG_PPC_BOOK3S_64
		struct hstate *h = hstate_vma(vma);

		psize = hstate_get_psize(h);
#ifdef CONFIG_DEBUG_VM
		assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
#endif

#else
		/*
		 * Not used on non book3s64 platforms.
		 * 8xx compares it with mmu_virtual_psize to
		 * know if it is a huge page or not.
		 */
		psize = MMU_PAGE_COUNT;
#endif
		__ptep_set_access_flags(vma, ptep, pte, addr, psize);
	}
	return changed;
#endif
}

#if defined(CONFIG_PPC_8xx)
static void __set_huge_pte_at(pmd_t *pmd, pte_t *ptep, pte_basic_t val)
{
	pte_basic_t *entry = (pte_basic_t *)ptep;
	int num, i;

	/*
	 * Make sure hardware valid bit is not set. We don't do
	 * tlb flush for this update.
	 */
	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));

	num = number_of_cells_per_pte(pmd, val, 1);

	for (i = 0; i < num; i++, entry++, val += SZ_4K)
		*entry = val;
}

void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
		     pte_t pte, unsigned long sz)
{
	pmd_t *pmdp = pmd_off(mm, addr);

	pte = set_pte_filter(pte, addr);

	if (sz == SZ_8M) { /* Flag both PMD entries as 8M and fill both page tables */
		*pmdp = __pmd(pmd_val(*pmdp) | _PMD_PAGE_8M);
		*(pmdp + 1) = __pmd(pmd_val(*(pmdp + 1)) | _PMD_PAGE_8M);

		__set_huge_pte_at(pmdp, pte_offset_kernel(pmdp, 0), pte_val(pte));
		__set_huge_pte_at(pmdp, pte_offset_kernel(pmdp + 1, 0), pte_val(pte) + SZ_4M);
	} else {
		__set_huge_pte_at(pmdp, ptep, pte_val(pte));
	}
}
#else
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
		     pte_t pte, unsigned long sz)
{
	unsigned long pdsize;
	int i;

	pte = set_pte_filter(pte, addr);

	/*
	 * Make sure hardware valid bit is not set. We don't do
	 * tlb flush for this update.
	 */
	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));

	if (sz < PMD_SIZE)
		pdsize = PAGE_SIZE;
	else if (sz < PUD_SIZE)
		pdsize = PMD_SIZE;
	else if (sz < P4D_SIZE)
		pdsize = PUD_SIZE;
	else if (sz < PGDIR_SIZE)
		pdsize = P4D_SIZE;
	else
		pdsize = PGDIR_SIZE;

	for (i = 0; i < sz / pdsize; i++, ptep++, addr += pdsize) {
		__set_pte_at(mm, addr, ptep, pte, 0);
		pte = __pte(pte_val(pte) + ((unsigned long long)pdsize / PAGE_SIZE << PFN_PTE_SHIFT));
	}
}
#endif
#endif /* CONFIG_HUGETLB_PAGE */

#ifdef CONFIG_DEBUG_VM
void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	spinlock_t *ptl;

	if (mm == &init_mm)
		return;
	pgd = mm->pgd + pgd_index(addr);
	BUG_ON(pgd_none(*pgd));
	p4d = p4d_offset(pgd, addr);
	BUG_ON(p4d_none(*p4d));
	pud = pud_offset(p4d, addr);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, addr);
	/*
	 * khugepaged to collapse normal pages to hugepage, first set
	 * pmd to none to force page fault/gup to take mmap_lock. After
	 * pmd is set to none, we do a pte_clear which does this assertion
	 * so if we find pmd none, return.
	 */
	if (pmd_none(*pmd))
		return;
	pte = pte_offset_map_nolock(mm, pmd, addr, &ptl);
	BUG_ON(!pte);
	assert_spin_locked(ptl);
	pte_unmap(pte);
}
#endif /* CONFIG_DEBUG_VM */

unsigned long vmalloc_to_phys(void *va)
{
	unsigned long pfn = vmalloc_to_pfn(va);

	BUG_ON(!pfn);
	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
}
EXPORT_SYMBOL_GPL(vmalloc_to_phys);

/*
 * We have 3 cases for pgds and pmds:
 * (1) invalid (all zeroes)
 * (2) pointer to next table, as normal; bottom 6 bits == 0
 * (3) leaf pte for huge page _PAGE_PTE set
 *
 * So long as we atomically load page table pointers we are safe against teardown,
 * we can follow the address down to the page and take a ref on it.
 * This function need to be called with interrupts disabled. We use this variant
 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
 */
pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
			bool *is_thp, unsigned *hpage_shift)
{
	pgd_t *pgdp;
#ifdef CONFIG_PPC64
	p4d_t p4d, *p4dp;
	pud_t pud, *pudp;
#endif
	pmd_t pmd, *pmdp;
	pte_t *ret_pte;
	unsigned pdshift;

	if (hpage_shift)
		*hpage_shift = 0;

	if (is_thp)
		*is_thp = false;

	/*
	 * Always operate on the local stack value. This make sure the
	 * value don't get updated by a parallel THP split/collapse,
	 * page fault or a page unmap. The return pte_t * is still not
	 * stable. So should be checked there for above conditions.
	 * Top level is an exception because it is folded into p4d.
	 *
	 * On PPC32, P4D/PUD/PMD are folded into PGD so go straight to
	 * PMD level.
	 */
	pgdp = pgdir + pgd_index(ea);
#ifdef CONFIG_PPC64
	p4dp = p4d_offset(pgdp, ea);
	p4d  = READ_ONCE(*p4dp);
	pdshift = P4D_SHIFT;

	if (p4d_none(p4d))
		return NULL;

	if (p4d_leaf(p4d)) {
		ret_pte = (pte_t *)p4dp;
		goto out;
	}

	/*
	 * Even if we end up with an unmap, the pgtable will not
	 * be freed, because we do an rcu free and here we are
	 * irq disabled
	 */
	pdshift = PUD_SHIFT;
	pudp = pud_offset(&p4d, ea);
	pud  = READ_ONCE(*pudp);

	if (pud_none(pud))
		return NULL;

	if (pud_leaf(pud)) {
		ret_pte = (pte_t *)pudp;
		goto out;
	}

	pmdp = pmd_offset(&pud, ea);
#else
	pmdp = pmd_offset(pud_offset(p4d_offset(pgdp, ea), ea), ea);
#endif
	pdshift = PMD_SHIFT;
	pmd  = READ_ONCE(*pmdp);

	/*
	 * A hugepage collapse is captured by this condition, see
	 * pmdp_collapse_flush.
	 */
	if (pmd_none(pmd))
		return NULL;

#ifdef CONFIG_PPC_BOOK3S_64
	/*
	 * A hugepage split is captured by this condition, see
	 * pmdp_invalidate.
	 *
	 * Huge page modification can be caught here too.
	 */
	if (pmd_is_serializing(pmd))
		return NULL;
#endif

	if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
		if (is_thp)
			*is_thp = true;
		ret_pte = (pte_t *)pmdp;
		goto out;
	}

	if (pmd_leaf(pmd)) {
		ret_pte = (pte_t *)pmdp;
		goto out;
	}

	return pte_offset_kernel(&pmd, ea);

out:
	if (hpage_shift)
		*hpage_shift = pdshift;
	return ret_pte;
}
EXPORT_SYMBOL_GPL(__find_linux_pte);

/* Note due to the way vm flags are laid out, the bits are XWR */
const pgprot_t protection_map[16] = {
	[VM_NONE]					= PAGE_NONE,
	[VM_READ]					= PAGE_READONLY,
	[VM_WRITE]					= PAGE_COPY,
	[VM_WRITE | VM_READ]				= PAGE_COPY,
	[VM_EXEC]					= PAGE_EXECONLY_X,
	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
	[VM_EXEC | VM_WRITE]				= PAGE_COPY_X,
	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
	[VM_SHARED]					= PAGE_NONE,
	[VM_SHARED | VM_READ]				= PAGE_READONLY,
	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
	[VM_SHARED | VM_EXEC]				= PAGE_EXECONLY_X,
	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_X,
	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
};

#ifndef CONFIG_PPC_BOOK3S_64
DECLARE_VM_GET_PAGE_PROT
#endif