summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@csgroup.eu>2020-11-24 18:24:55 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2020-12-09 15:47:45 +0300
commitbccc58986a2f98e3af349c85c5f49aac7fb19ef2 (patch)
tree10f5e46c8f0d3298cb3fddd58300439b4f0135f3
parent613df979da6c032cbe6fa273fb8ca21af022157e (diff)
downloadlinux-bccc58986a2f98e3af349c85c5f49aac7fb19ef2.tar.xz
powerpc/8xx: Always pin kernel text TLB
There is no big poing in not pinning kernel text anymore, as now we can keep pinned TLB even with things like DEBUG_PAGEALLOC. Remove CONFIG_PIN_TLB_TEXT, making it always right. Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu> [mpe: Drop ifdef around mmu_pin_tlb() to fix build errors] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/203b89de491e1379f1677a2685211b7c32adfff0.1606231483.git.christophe.leroy@csgroup.eu
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/kernel/head_8xx.S22
-rw-r--r--arch/powerpc/mm/nohash/8xx.c3
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig7
4 files changed, 5 insertions, 30 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d0d16c3e1b4b..2b8e47d1aa91 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -808,8 +808,7 @@ config DATA_SHIFT_BOOL
bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC
- depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && \
- (!PIN_TLB_TEXT || !STRICT_KERNEL_RWX))
+ depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
help
This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 66ee62f30d36..e5d57018e1a0 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -43,15 +43,6 @@
.endm
/*
- * We need an ITLB miss handler for kernel addresses if:
- * - Either we have modules
- * - Or we have not pinned the first 8M
- */
-#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT)
-#define ITLB_MISS_KERNEL 1
-#endif
-
-/*
* Value for the bits that have fixed value in RPN entries.
* Also used for tagging DAR for DTLBerror.
*/
@@ -209,12 +200,12 @@ InstructionTLBMiss:
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
INVALIDATE_ADJACENT_PAGES_CPU15(r10)
mtspr SPRN_MD_EPN, r10
-#ifdef ITLB_MISS_KERNEL
+#ifdef CONFIG_MODULES
mfcr r11
compare_to_kernel_boundary r10, r10
#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */
-#ifdef ITLB_MISS_KERNEL
+#ifdef CONFIG_MODULES
blt+ 3f
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
@@ -618,10 +609,6 @@ start_here:
lis r0, (MD_TWAM | MD_RSV4I)@h
mtspr SPRN_MD_CTR, r0
#endif
-#ifndef CONFIG_PIN_TLB_TEXT
- li r0, 0
- mtspr SPRN_MI_CTR, r0
-#endif
#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
lis r0, MD_TWAM@h
mtspr SPRN_MD_CTR, r0
@@ -717,7 +704,6 @@ initial_mmu:
mtspr SPRN_DER, r8
blr
-#ifdef CONFIG_PIN_TLB
_GLOBAL(mmu_pin_tlb)
lis r9, (1f - PAGE_OFFSET)@h
ori r9, r9, (1f - PAGE_OFFSET)@l
@@ -739,7 +725,6 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_MD_CTR, r6
tlbia
-#ifdef CONFIG_PIN_TLB_TEXT
LOAD_REG_IMMEDIATE(r5, 28 << 8)
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
@@ -760,7 +745,7 @@ _GLOBAL(mmu_pin_tlb)
bdnzt lt, 2b
lis r0, MI_RSV4I@h
mtspr SPRN_MI_CTR, r0
-#endif
+
LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
#ifdef CONFIG_PIN_TLB_DATA
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
@@ -818,7 +803,6 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
rfi
-#endif /* CONFIG_PIN_TLB */
/*
* We put a few things here that have to be page-aligned.
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 231ca95f9ffb..19a3eec1d8c5 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -186,8 +186,7 @@ void mmu_mark_initmem_nx(void)
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
- if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_pin_tlb(block_mapped_ram, false);
+ mmu_pin_tlb(block_mapped_ram, false);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index abb2b45b2789..60cc5b537a98 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -194,13 +194,6 @@ config PIN_TLB_IMMR
CONFIG_PIN_TLB_DATA is also selected, it will reduce
CONFIG_PIN_TLB_DATA to 24 Mbytes.
-config PIN_TLB_TEXT
- bool "Pinned TLB for TEXT"
- depends on PIN_TLB
- default y
- help
- This pins kernel text with 8M pages.
-
endmenu
endmenu