summaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugepage.c15
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c3
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c14
-rw-r--r--arch/powerpc/mm/drmem.c10
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c6
-rw-r--r--arch/powerpc/mm/pageattr.c28
-rw-r--r--arch/powerpc/mm/pgtable_32.c15
8 files changed, 47 insertions, 46 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_hugepage.c b/arch/powerpc/mm/book3s64/hash_hugepage.c
index c0fabe6c5a12..15d6f3ea7178 100644
--- a/arch/powerpc/mm/book3s64/hash_hugepage.c
+++ b/arch/powerpc/mm/book3s64/hash_hugepage.c
@@ -59,16 +59,13 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
rflags = htab_convert_pte_flags(new_pmd, flags);
-#if 0
- if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+ /*
+ * THPs are only supported on platforms that can do mixed page size
+ * segments (MPSS) and all such platforms have coherent icache. Hence we
+ * don't need to do lazy icache flush (hash_page_do_lazy_icache()) on
+ * noexecute fault.
+ */
- /*
- * No CPU has hugepages but lacks no execute, so we
- * don't need to worry about that case
- */
- rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
- }
-#endif
/*
* Find the slot index details for this ea, using base page size.
*/
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 0626a25b0d72..01c3b4b65241 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -2172,7 +2172,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}
-void hash__kernel_map_pages(struct page *page, int numpages, int enable)
+int hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;
@@ -2189,6 +2189,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
kernel_unmap_linear_page(vaddr, lmi);
}
local_irq_restore(flags);
+ return 0;
}
#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 5cc4008329be..15e88f1439ec 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -1339,20 +1339,6 @@ void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
#endif
#endif
-#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
-void radix__kernel_map_pages(struct page *page, int numpages, int enable)
-{
- unsigned long addr;
-
- addr = (unsigned long)page_address(page);
-
- if (enable)
- set_memory_p(addr, numpages);
- else
- set_memory_np(addr, numpages);
-}
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index fde7790277f7..c110ab8fa8a3 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -393,17 +393,17 @@ static const __be32 *of_get_usable_memory(struct device_node *dn)
int walk_drmem_lmbs(struct device_node *dn, void *data,
int (*func)(struct drmem_lmb *, const __be32 **, void *))
{
+ struct device_node *root = of_find_node_by_path("/");
const __be32 *prop, *usm;
int ret = -ENODEV;
- if (!of_root)
+ if (!root)
return ret;
/* Get the address & size cells */
- of_node_get(of_root);
- n_root_addr_cells = of_n_addr_cells(of_root);
- n_root_size_cells = of_n_size_cells(of_root);
- of_node_put(of_root);
+ n_root_addr_cells = of_n_addr_cells(root);
+ n_root_size_cells = of_n_size_cells(root);
+ of_node_put(root);
if (init_drmem_lmb_size(dn))
return ret;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 90dcc2844056..8e84bc214d13 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -180,3 +180,5 @@ static inline bool debug_pagealloc_enabled_or_kfence(void)
int create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
#endif
+
+int hash__kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index f6c4ace3b221..a490724e84ad 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1111,7 +1111,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
static void __init find_possible_nodes(void)
{
- struct device_node *rtas;
+ struct device_node *rtas, *root;
const __be32 *domains = NULL;
int prop_length, max_nodes;
u32 i;
@@ -1132,10 +1132,12 @@ static void __init find_possible_nodes(void)
* If the LPAR is migratable, new nodes might be activated after a LPM,
* so we should consider the max number in that case.
*/
- if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
+ root = of_find_node_by_path("/");
+ if (!of_get_property(root, "ibm,migratable-partition", NULL))
domains = of_get_property(rtas,
"ibm,current-associativity-domains",
&prop_length);
+ of_node_put(root);
if (!domains) {
domains = of_get_property(rtas, "ibm,max-associativity-domains",
&prop_length);
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
index 6163e484bc6d..ac22bf28086f 100644
--- a/arch/powerpc/mm/pageattr.c
+++ b/arch/powerpc/mm/pageattr.c
@@ -14,6 +14,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <mm/mmu_decl.h>
static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr,
unsigned long old, unsigned long new)
@@ -38,6 +39,10 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
/* Don't clear DIRTY bit */
pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO);
break;
+ case SET_MEMORY_ROX:
+ /* Don't clear DIRTY bit */
+ pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_ROX);
+ break;
case SET_MEMORY_RW:
pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW);
break;
@@ -97,3 +102,26 @@ int change_memory_attr(unsigned long addr, int numpages, long action)
return apply_to_existing_page_range(&init_mm, start, size,
change_page_attr, (void *)action);
}
+
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+#ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ int err;
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (PageHighMem(page))
+ return;
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
+ err = hash__kernel_map_pages(page, numpages, enable);
+ else if (enable)
+ err = set_memory_p(addr, numpages);
+ else
+ err = set_memory_np(addr, numpages);
+
+ if (err)
+ panic("%s: changing memory protections failed\n", __func__);
+}
+#endif
+#endif
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 12498017da8e..face94977cb2 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -167,18 +167,3 @@ void mark_rodata_ro(void)
set_memory_ro((unsigned long)_stext, numpages);
}
#endif
-
-#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
-void __kernel_map_pages(struct page *page, int numpages, int enable)
-{
- unsigned long addr = (unsigned long)page_address(page);
-
- if (PageHighMem(page))
- return;
-
- if (enable)
- set_memory_p(addr, numpages);
- else
- set_memory_np(addr, numpages);
-}
-#endif /* CONFIG_DEBUG_PAGEALLOC */