1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
* Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
*/
#ifndef _ASM_PGTABLE_H
#define _ASM_PGTABLE_H
#include <linux/compiler.h>
#include <asm/addrspace.h>
#include <asm/page.h>
#include <asm/pgtable-bits.h>
#if CONFIG_PGTABLE_LEVELS == 2
#include <asm-generic/pgtable-nopmd.h>
#elif CONFIG_PGTABLE_LEVELS == 3
#include <asm-generic/pgtable-nopud.h>
#else
#include <asm-generic/pgtable-nop4d.h>
#endif
#if CONFIG_PGTABLE_LEVELS == 2
#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#elif CONFIG_PGTABLE_LEVELS == 3
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
#elif CONFIG_PGTABLE_LEVELS == 4
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
#define PUD_SIZE (1UL << PUD_SHIFT)
#define PUD_MASK (~(PUD_SIZE-1))
#define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
#define PTRS_PER_PGD (PAGE_SIZE >> 3)
#if CONFIG_PGTABLE_LEVELS > 3
#define PTRS_PER_PUD (PAGE_SIZE >> 3)
#endif
#if CONFIG_PGTABLE_LEVELS > 2
#define PTRS_PER_PMD (PAGE_SIZE >> 3)
#endif
#define PTRS_PER_PTE (PAGE_SIZE >> 3)
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
#include <linux/mmzone.h>
#include <asm/fixmap.h>
#include <asm/sparsemem.h>
struct mm_struct;
struct vm_area_struct;
/*
* ZERO_PAGE is a global shared page that is always zero; used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
/*
* TLB refill handlers may also map the vmalloc area into xkvrange.
* Avoid the first couple of pages so NULL pointer dereferences will
* still reliably trap.
*/
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define MODULES_END (MODULES_VADDR + SZ_256M)
#ifdef CONFIG_KFENCE
#define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
#else
#define KFENCE_AREA_SIZE 0
#endif
#define VMALLOC_START MODULES_END
#ifndef CONFIG_KASAN
#define VMALLOC_END \
(vm_map_base + \
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
#else
#define VMALLOC_END \
(vm_map_base + \
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
#endif
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
#define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
#endif
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_ERROR(e) \
pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
#endif
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pte_t invalid_pte_table[PTRS_PER_PTE];
#ifndef __PAGETABLE_PUD_FOLDED
typedef struct { unsigned long pud; } pud_t;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) })
extern pud_t invalid_pud_table[PTRS_PER_PUD];
/*
* Empty pgd/p4d entries point to the invalid_pud_table.
*/
static inline int p4d_none(p4d_t p4d)
{
return p4d_val(p4d) == (unsigned long)invalid_pud_table;
}
static inline int p4d_bad(p4d_t p4d)
{
return p4d_val(p4d) & ~PAGE_MASK;
}
static inline int p4d_present(p4d_t p4d)
{
return p4d_val(p4d) != (unsigned long)invalid_pud_table;
}
static inline void p4d_clear(p4d_t *p4dp)
{
p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
}
static inline pud_t *p4d_pgtable(p4d_t p4d)
{
return (pud_t *)p4d_val(p4d);
}
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
*p4d = p4dval;
}
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
#define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
#endif
#ifndef __PAGETABLE_PMD_FOLDED
typedef struct { unsigned long pmd; } pmd_t;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) })
extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
/*
* Empty pud entries point to the invalid_pmd_table.
*/
static inline int pud_none(pud_t pud)
{
return pud_val(pud) == (unsigned long)invalid_pmd_table;
}
static inline int pud_bad(pud_t pud)
{
return pud_val(pud) & ~PAGE_MASK;
}
static inline int pud_present(pud_t pud)
{
return pud_val(pud) != (unsigned long)invalid_pmd_table;
}
static inline void pud_clear(pud_t *pudp)
{
pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
}
static inline pmd_t *pud_pgtable(pud_t pud)
{
return (pmd_t *)pud_val(pud);
}
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
#define pud_phys(pud) PHYSADDR(pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
#endif
/*
* Empty pmd entries point to the invalid_pte_table.
*/
static inline int pmd_none(pmd_t pmd)
{
return pmd_val(pmd) == (unsigned long)invalid_pte_table;
}
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_val(pmd) & ~PAGE_MASK);
}
static inline int pmd_present(pmd_t pmd)
{
if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
return pmd_val(pmd) != (unsigned long)invalid_pte_table;
}
static inline void pmd_clear(pmd_t *pmdp)
{
pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
}
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#define pmd_page_vaddr(pmd) pmd_val(pmd)
extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> PFN_PTE_SHIFT))
#define pfn_pte(pfn, prot) __pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
/*
* Initialize a new pgd / pud / pmd table with invalid pointers.
*/
extern void pgd_init(void *addr);
extern void pud_init(void *addr);
extern void pmd_init(void *addr);
/*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* are !pte_none() && !pte_present().
*
* Format of swap PTEs:
*
* 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
* 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
* <--------------------------- offset ---------------------------
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* --------------> E <--- type ---> <---------- zeroes ---------->
*
* E is the exclusive marker that is not stored in swap entries.
* The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
*/
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{ pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
#define __swp_type(x) (((x).val >> 16) & 0x7f)
#define __swp_offset(x) ((x).val >> 24)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
#define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
static inline int pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}
static inline pte_t pte_swp_mkexclusive(pte_t pte)
{
pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
return pte;
}
static inline pte_t pte_swp_clear_exclusive(pte_t pte)
{
pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
return pte;
}
extern void paging_init(void);
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
*ptep = pteval;
if (pte_val(pteval) & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
/*
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
#ifdef CONFIG_SMP
/*
* For SMP, multiple CPUs can race, so we need to do
* this atomically.
*/
unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp;
__asm__ __volatile__ (
"1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n"
" or %[tmp], %[tmp], %[global] \n"
__SC "%[tmp], %[buddy] \n"
" beqz %[tmp], 1b \n"
" nop \n"
"2: \n"
__WEAK_LLSC_MB
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
#endif /* CONFIG_SMP */
}
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
/* Preserve global status for the pair */
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
set_pte(ptep, __pte(_PAGE_GLOBAL));
else
set_pte(ptep, __pte(0));
}
#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
#define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
extern pgd_t swapper_pg_dir[];
extern pgd_t invalid_pg_dir[];
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & (_PAGE_DIRTY | _PAGE_MODIFIED); }
static inline pte_t pte_mkold(pte_t pte)
{
pte_val(pte) &= ~_PAGE_ACCESSED;
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_ACCESSED;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte_val(pte) |= _PAGE_MODIFIED;
if (pte_val(pte) & _PAGE_WRITE)
pte_val(pte) |= _PAGE_DIRTY;
return pte;
}
static inline pte_t pte_mkwrite_novma(pte_t pte)
{
pte_val(pte) |= _PAGE_WRITE;
if (pte_val(pte) & _PAGE_MODIFIED)
pte_val(pte) |= _PAGE_DIRTY;
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
return pte;
}
static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
static inline pte_t pte_mkhuge(pte_t pte)
{
pte_val(pte) |= _PAGE_HUGE;
return pte;
}
#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
#define pte_accessible pte_accessible
static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
if (pte_val(a) & _PAGE_PRESENT)
return true;
if ((pte_val(a) & _PAGE_PROTNONE) &&
atomic_read(&mm->tlb_flush_pending))
return true;
return false;
}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
extern void __update_tlb(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
static inline void update_mmu_cache_range(struct vm_fault *vmf,
struct vm_area_struct *vma, unsigned long address,
pte_t *ptep, unsigned int nr)
{
for (;;) {
__update_tlb(vma, address, ptep);
if (--nr == 0)
break;
address += PAGE_SIZE;
ptep++;
}
}
#define update_mmu_cache(vma, addr, ptep) \
update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
#define update_mmu_tlb_range(vma, addr, ptep, nr) \
update_mmu_cache_range(NULL, vma, addr, ptep, nr)
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
__update_tlb(vma, address, (pte_t *)pmdp);
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & _PFN_MASK) >> PFN_PTE_SHIFT;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
#define pmdp_establish generic_pmdp_establish
static inline int pmd_trans_huge(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
pmd_val(pmd) |= _PAGE_HUGE;
return pmd;
}
#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_WRITE);
}
static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_WRITE;
if (pmd_val(pmd) & _PAGE_MODIFIED)
pmd_val(pmd) |= _PAGE_DIRTY;
return pmd;
}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
return pmd;
}
#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
return !!(pmd_val(pmd) & (_PAGE_DIRTY | _PAGE_MODIFIED));
}
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
return pmd;
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_MODIFIED;
if (pmd_val(pmd) & _PAGE_WRITE)
pmd_val(pmd) |= _PAGE_DIRTY;
return pmd;
}
#define pmd_young pmd_young
static inline int pmd_young(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_ACCESSED);
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
pmd_val(pmd) &= ~_PAGE_ACCESSED;
return pmd;
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_ACCESSED;
return pmd;
}
static inline struct page *pmd_page(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
return pfn_to_page(pmd_pfn(pmd));
return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
(pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
return pmd;
}
static inline pmd_t pmd_mkinvalid(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_PRESENT_INVALID;
pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
return pmd;
}
/*
* The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
* different prototype.
*/
#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
pmd_t old = *pmdp;
pmd_clear(pmdp);
return old;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_NUMA_BALANCING
static inline long pte_protnone(pte_t pte)
{
return (pte_val(pte) & _PAGE_PROTNONE);
}
static inline long pmd_protnone(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_PROTNONE);
}
#endif /* CONFIG_NUMA_BALANCING */
#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
*/
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_PGTABLE_H */
|