blob: 5c28bd6f2ae13f52fbbf79da538ece1de6600d3d (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
/*
* For radix we want generic code to handle hugetlb. But then if we want
* both hash and radix to be enabled together we need to workaround the
* limitations.
*/
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern unsigned long
radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
static inline int hstate_get_psize(struct hstate *hstate)
{
unsigned long shift;
shift = huge_page_shift(hstate);
if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
return MMU_PAGE_2M;
else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
return MMU_PAGE_1G;
else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
return MMU_PAGE_16M;
else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
return MMU_PAGE_16G;
else {
WARN(1, "Wrong huge page shift\n");
return mmu_virtual_psize;
}
}
#define arch_make_huge_pte arch_make_huge_pte
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable)
{
unsigned long page_shift;
if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
return entry;
page_shift = huge_page_shift(hstate_vma(vma));
/*
* We don't support 1G hugetlb pages yet.
*/
VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
return __pte(pte_val(entry) | R_PAGE_LARGE);
else
return entry;
}
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
static inline bool gigantic_page_supported(void)
{
if (radix_enabled())
return true;
return false;
}
#endif
#endif
|