From 9cfb541a4ad45168925078f7d1fe3a7363ba27e2 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Tue, 3 Apr 2018 10:36:37 +0100 Subject: ARM: 8754/1: NOMMU: Move PMSAv7 MPU under it's own namespace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are going to support different MPU which programming model is not compatible to PMSAv7, so move PMSAv7 MPU under it's own namespace. Tested-by: Szemz? AndrĂ¡s Tested-by: Alexandre TORGUE Signed-off-by: Vladimir Murzin Signed-off-by: Russell King --- arch/arm/include/asm/mpu.h | 62 ++++++++++++++++++++++------------------------ arch/arm/include/asm/v7m.h | 6 ++--- 2 files changed, 32 insertions(+), 36 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h index 6d1491c8ee22..fbde275668c3 100644 --- a/arch/arm/include/asm/mpu.h +++ b/arch/arm/include/asm/mpu.h @@ -14,50 +14,50 @@ #define MMFR0_PMSAv7 (3 << 4) /* MPU D/I Size Register fields */ -#define MPU_RSR_SZ 1 -#define MPU_RSR_EN 0 -#define MPU_RSR_SD 8 +#define PMSAv7_RSR_SZ 1 +#define PMSAv7_RSR_EN 0 +#define PMSAv7_RSR_SD 8 /* Number of subregions (SD) */ -#define MPU_NR_SUBREGS 8 -#define MPU_MIN_SUBREG_SIZE 256 +#define PMSAv7_NR_SUBREGS 8 +#define PMSAv7_MIN_SUBREG_SIZE 256 /* The D/I RSR value for an enabled region spanning the whole of memory */ -#define MPU_RSR_ALL_MEM 63 +#define PMSAv7_RSR_ALL_MEM 63 /* Individual bits in the DR/IR ACR */ -#define MPU_ACR_XN (1 << 12) -#define MPU_ACR_SHARED (1 << 2) +#define PMSAv7_ACR_XN (1 << 12) +#define PMSAv7_ACR_SHARED (1 << 2) /* C, B and TEX[2:0] bits only have semantic meanings when grouped */ -#define MPU_RGN_CACHEABLE 0xB -#define MPU_RGN_SHARED_CACHEABLE (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) -#define MPU_RGN_STRONGLY_ORDERED 0 +#define PMSAv7_RGN_CACHEABLE 0xB +#define PMSAv7_RGN_SHARED_CACHEABLE (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED) +#define PMSAv7_RGN_STRONGLY_ORDERED 0 /* Main region should only be shared for SMP */ #ifdef CONFIG_SMP -#define MPU_RGN_NORMAL (MPU_RGN_CACHEABLE | MPU_ACR_SHARED) +#define PMSAv7_RGN_NORMAL (PMSAv7_RGN_CACHEABLE | PMSAv7_ACR_SHARED) #else -#define MPU_RGN_NORMAL MPU_RGN_CACHEABLE +#define PMSAv7_RGN_NORMAL PMSAv7_RGN_CACHEABLE #endif /* Access permission bits of ACR (only define those that we use)*/ -#define MPU_AP_PL1RO_PL0NA (0x5 << 8) -#define MPU_AP_PL1RW_PL0RW (0x3 << 8) -#define MPU_AP_PL1RW_PL0R0 (0x2 << 8) -#define MPU_AP_PL1RW_PL0NA (0x1 << 8) +#define PMSAv7_AP_PL1RO_PL0NA (0x5 << 8) +#define PMSAv7_AP_PL1RW_PL0RW (0x3 << 8) +#define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8) +#define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8) /* For minimal static MPU region configurations */ -#define MPU_PROBE_REGION 0 -#define MPU_BG_REGION 1 -#define MPU_RAM_REGION 2 -#define MPU_ROM_REGION 3 +#define PMSAv7_PROBE_REGION 0 +#define PMSAv7_BG_REGION 1 +#define PMSAv7_RAM_REGION 2 +#define PMSAv7_ROM_REGION 3 /* Maximum number of regions Linux is interested in */ -#define MPU_MAX_REGIONS 16 +#define MPU_MAX_REGIONS 16 -#define MPU_DATA_SIDE 0 -#define MPU_INSTR_SIDE 1 +#define PMSAv7_DATA_SIDE 0 +#define PMSAv7_INSTR_SIDE 1 #ifndef __ASSEMBLY__ @@ -75,16 +75,12 @@ struct mpu_rgn_info { extern struct mpu_rgn_info mpu_rgn_info; #ifdef CONFIG_ARM_MPU - -extern void __init adjust_lowmem_bounds_mpu(void); -extern void __init mpu_setup(void); - +extern void __init pmsav7_adjust_lowmem_bounds(void); +extern void __init pmsav7_setup(void); #else - -static inline void adjust_lowmem_bounds_mpu(void) {} -static inline void mpu_setup(void) {} - -#endif /* !CONFIG_ARM_MPU */ +static inline void pmsav7_adjust_lowmem_bounds(void) {}; +static inline void pmsav7_setup(void) {}; +#endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h index 634e77107425..aba49e0b3ebe 100644 --- a/arch/arm/include/asm/v7m.h +++ b/arch/arm/include/asm/v7m.h @@ -64,9 +64,9 @@ #define MPU_CTRL_ENABLE 1 #define MPU_CTRL_PRIVDEFENA (1 << 2) -#define MPU_RNR 0x98 -#define MPU_RBAR 0x9c -#define MPU_RASR 0xa0 +#define PMSAv7_RNR 0x98 +#define PMSAv7_RBAR 0x9c +#define PMSAv7_RASR 0xa0 /* Cache opeartions */ #define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ -- cgit v1.2.3 From 046835b4aa22b9ab6aa0bb274e3b71047c4b887d Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Tue, 3 Apr 2018 10:39:23 +0100 Subject: ARM: 8757/1: NOMMU: Support PMSAv8 MPU ARMv8R/M architecture defines new memory protection scheme - PMSAv8 which is not compatible with PMSAv7. Key differences to PMSAv7 are: - Region geometry is defined by base and limit addresses - Addresses need to be either 32 or 64 byte aligned - No region priority due to overlapping regions are not allowed - It is unified, i.e. no distinction between data/instruction regions - Memory attributes are controlled via MAIR This patch implements support for PMSAv8 MPU defined by ARMv8R/M architecture. Signed-off-by: Vladimir Murzin Signed-off-by: Russell King --- arch/arm/include/asm/mpu.h | 52 ++++++- arch/arm/include/asm/v7m.h | 8 + arch/arm/kernel/asm-offsets.c | 2 + arch/arm/kernel/head-nommu.S | 163 ++++++++++++++++++++ arch/arm/kernel/vmlinux-xip.lds.S | 4 + arch/arm/kernel/vmlinux.lds.S | 7 + arch/arm/mm/Makefile | 2 +- arch/arm/mm/nommu.c | 6 + arch/arm/mm/pmsa-v8.c | 307 ++++++++++++++++++++++++++++++++++++++ 9 files changed, 547 insertions(+), 4 deletions(-) create mode 100644 arch/arm/mm/pmsa-v8.c (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h index fbde275668c3..5e088c83d3d8 100644 --- a/arch/arm/include/asm/mpu.h +++ b/arch/arm/include/asm/mpu.h @@ -12,6 +12,7 @@ /* ID_MMFR0 data relevant to MPU */ #define MMFR0_PMSA (0xF << 4) #define MMFR0_PMSAv7 (3 << 4) +#define MMFR0_PMSAv8 (4 << 4) /* MPU D/I Size Register fields */ #define PMSAv7_RSR_SZ 1 @@ -47,12 +48,43 @@ #define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8) #define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8) +#define PMSAv8_BAR_XN 1 + +#define PMSAv8_LAR_EN 1 +#define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1) + + +#define PMSAv8_AP_PL1RW_PL0NA (0 << 1) +#define PMSAv8_AP_PL1RW_PL0RW (1 << 1) +#define PMSAv8_AP_PL1RO_PL0RO (3 << 1) + +#ifdef CONFIG_SMP +#define PMSAv8_RGN_SHARED (3 << 3) // inner sharable +#else +#define PMSAv8_RGN_SHARED (0 << 3) +#endif + +#define PMSAv8_RGN_DEVICE_nGnRnE 0 +#define PMSAv8_RGN_NORMAL 1 + +#define PMSAv8_MAIR(attr, mt) ((attr) << ((mt) * 8)) + +#ifdef CONFIG_CPU_V7M +#define PMSAv8_MINALIGN 32 +#else +#define PMSAv8_MINALIGN 64 +#endif + /* For minimal static MPU region configurations */ #define PMSAv7_PROBE_REGION 0 #define PMSAv7_BG_REGION 1 #define PMSAv7_RAM_REGION 2 #define PMSAv7_ROM_REGION 3 +/* Fixed for PMSAv8 only */ +#define PMSAv8_XIP_REGION 0 +#define PMSAv8_KERNEL_REGION 1 + /* Maximum number of regions Linux is interested in */ #define MPU_MAX_REGIONS 16 @@ -63,9 +95,18 @@ struct mpu_rgn { /* Assume same attributes for d/i-side */ - u32 drbar; - u32 drsr; - u32 dracr; + union { + u32 drbar; /* PMSAv7 */ + u32 prbar; /* PMSAv8 */ + }; + union { + u32 drsr; /* PMSAv7 */ + u32 prlar; /* PMSAv8 */ + }; + union { + u32 dracr; /* PMSAv7 */ + u32 unused; /* not used in PMSAv8 */ + }; }; struct mpu_rgn_info { @@ -76,10 +117,15 @@ extern struct mpu_rgn_info mpu_rgn_info; #ifdef CONFIG_ARM_MPU extern void __init pmsav7_adjust_lowmem_bounds(void); +extern void __init pmsav8_adjust_lowmem_bounds(void); + extern void __init pmsav7_setup(void); +extern void __init pmsav8_setup(void); #else static inline void pmsav7_adjust_lowmem_bounds(void) {}; +static inline void pmsav8_adjust_lowmem_bounds(void) {}; static inline void pmsav7_setup(void) {}; +static inline void pmsav8_setup(void) {}; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h index aba49e0b3ebe..187ccf6496ad 100644 --- a/arch/arm/include/asm/v7m.h +++ b/arch/arm/include/asm/v7m.h @@ -68,6 +68,14 @@ #define PMSAv7_RBAR 0x9c #define PMSAv7_RASR 0xa0 +#define PMSAv8_RNR 0x98 +#define PMSAv8_RBAR 0x9c +#define PMSAv8_RLAR 0xa0 +#define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n)) +#define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n)) +#define PMSAv8_MAIR0 0xc0 +#define PMSAv8_MAIR1 0xc4 + /* Cache opeartions */ #define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ #define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 250a98544ca6..27c5381518d8 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -197,6 +197,8 @@ int main(void) DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); + DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar)); + DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar)); #endif return 0; } diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 2f0f1ba6e237..dd546d65a383 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -132,6 +132,25 @@ M_CLASS(ldr r3, [r12, 0x50]) AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0 and r3, r3, #(MMFR0_PMSA) @ PMSA field teq r3, #(MMFR0_PMSAv7) @ PMSA v7 + beq 1f + teq r3, #(MMFR0_PMSAv8) @ PMSA v8 + /* + * Memory region attributes for PMSAv8: + * + * n = AttrIndx[2:0] + * n MAIR + * DEVICE_nGnRnE 000 00000000 + * NORMAL 001 11111111 + */ + ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \ + PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL) +AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0 +M_CLASS(streq r3, [r12, #PMSAv8_MAIR0]) + moveq r3, #0 +AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1 +M_CLASS(streq r3, [r12, #PMSAv8_MAIR1]) + +1: #endif #ifdef CONFIG_CPU_CP15 /* @@ -235,6 +254,8 @@ M_CLASS(ldr r0, [r12, 0x50]) and r0, r0, #(MMFR0_PMSA) @ PMSA field teq r0, #(MMFR0_PMSAv7) @ PMSA v7 beq __setup_pmsa_v7 + teq r0, #(MMFR0_PMSAv8) @ PMSA v8 + beq __setup_pmsa_v8 ret lr ENDPROC(__setup_mpu) @@ -304,6 +325,119 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE]) ret lr ENDPROC(__setup_pmsa_v7) +ENTRY(__setup_pmsa_v8) + mov r0, #0 +AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL +M_CLASS(str r0, [r12, #PMSAv8_RNR]) + isb + +#ifdef CONFIG_XIP_KERNEL + ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start + ldr r6, =(_exiprom) @ ROM end + sub r6, r6, #1 + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN) + +AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0 +AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0 +M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)]) +M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)]) +#endif + + ldr r5, =KERNEL_START + ldr r6, =KERNEL_END + sub r6, r6, #1 + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN) + +AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1 +AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1 +M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)]) +M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)]) + + /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */ +#ifdef CONFIG_XIP_KERNEL + ldr r6, =KERNEL_START + ldr r5, =CONFIG_XIP_PHYS_ADDR + cmp r6, r5 + movcs r6, r5 +#else + ldr r6, =KERNEL_START +#endif + cmp r6, #0 + beq 1f + + mov r5, #0 + sub r6, r6, #1 + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN) + +AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2 +AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2 +M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)]) +M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)]) + +1: + /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */ +#ifdef CONFIG_XIP_KERNEL + ldr r5, =KERNEL_END + ldr r6, =(_exiprom) + cmp r5, r6 + movcc r5, r6 +#else + ldr r5, =KERNEL_END +#endif + mov r6, #0xffffffff + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN) + +AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3 +AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3 +M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)]) +M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)]) + +#ifdef CONFIG_XIP_KERNEL + /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */ + ldr r5, =(_exiprom) + ldr r6, =KERNEL_END + cmp r5, r6 + movcs r5, r6 + + ldr r6, =KERNEL_START + ldr r0, =CONFIG_XIP_PHYS_ADDR + cmp r6, r0 + movcc r6, r0 + + sub r6, r6, #1 + bic r6, r6, #(PMSAv8_MINALIGN - 1) + + orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN) + orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN) + +#ifdef CONFIG_CPU_V7M + /* There is no alias for n == 4 */ + mov r0, #4 + str r0, [r12, #PMSAv8_RNR] @ PRSEL + isb + + str r5, [r12, #PMSAv8_RBAR_A(0)] + str r6, [r12, #PMSAv8_RLAR_A(0)] +#else + mcr p15, 0, r5, c6, c10, 1 @ PRBAR4 + mcr p15, 0, r6, c6, c10, 2 @ PRLAR4 +#endif +#endif + ret lr +ENDPROC(__setup_pmsa_v8) + #ifdef CONFIG_SMP /* * r6: pointer at mpu_rgn_info @@ -319,6 +453,8 @@ ENTRY(__secondary_setup_mpu) and r0, r0, #(MMFR0_PMSA) @ PMSA field teq r0, #(MMFR0_PMSAv7) @ PMSA v7 beq __secondary_setup_pmsa_v7 + teq r0, #(MMFR0_PMSAv8) @ PMSA v8 + beq __secondary_setup_pmsa_v8 b __error_p ENDPROC(__secondary_setup_mpu) @@ -361,6 +497,33 @@ ENTRY(__secondary_setup_pmsa_v7) ret lr ENDPROC(__secondary_setup_pmsa_v7) +ENTRY(__secondary_setup_pmsa_v8) + ldr r4, [r6, #MPU_RNG_INFO_USED] +#ifndef CONFIG_XIP_KERNEL + add r4, r4, #1 +#endif + mov r5, #MPU_RNG_SIZE + add r3, r6, #MPU_RNG_INFO_RNGS + mla r3, r4, r5, r3 + +1: + sub r3, r3, #MPU_RNG_SIZE + sub r4, r4, #1 + + mcr p15, 0, r4, c6, c2, 1 @ PRSEL + isb + + ldr r5, [r3, #MPU_RGN_PRBAR] + ldr r6, [r3, #MPU_RGN_PRLAR] + + mcr p15, 0, r5, c6, c3, 0 @ PRBAR + mcr p15, 0, r6, c6, c3, 1 @ PRLAR + + cmp r4, #0 + bgt 1b + + ret lr +ENDPROC(__secondary_setup_pmsa_v8) #endif /* CONFIG_SMP */ #endif /* CONFIG_ARM_MPU */ #include "head-common.S" diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index d32f5d35f602..3593d5c1acd2 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "vmlinux.lds.h" @@ -148,6 +149,9 @@ SECTIONS __init_end = .; BSS_SECTION(0, 0, 8) +#ifdef CONFIG_ARM_MPU + . = ALIGN(PMSAv8_MINALIGN); +#endif _end = .; STABS_DEBUG diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index b77dc675ae55..23150c0f0f4d 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -54,6 +55,9 @@ SECTIONS . = ALIGN(1< +#include + +#include +#include +#include + +#include +#include + +#include "mm.h" + +#ifndef CONFIG_CPU_V7M + +#define PRSEL __ACCESS_CP15(c6, 0, c2, 1) +#define PRBAR __ACCESS_CP15(c6, 0, c3, 0) +#define PRLAR __ACCESS_CP15(c6, 0, c3, 1) + +static inline u32 prlar_read(void) +{ + return read_sysreg(PRLAR); +} + +static inline u32 prbar_read(void) +{ + return read_sysreg(PRBAR); +} + +static inline void prsel_write(u32 v) +{ + write_sysreg(v, PRSEL); +} + +static inline void prbar_write(u32 v) +{ + write_sysreg(v, PRBAR); +} + +static inline void prlar_write(u32 v) +{ + write_sysreg(v, PRLAR); +} +#else + +static inline u32 prlar_read(void) +{ + return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR); +} + +static inline u32 prbar_read(void) +{ + return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR); +} + +static inline void prsel_write(u32 v) +{ + writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR); +} + +static inline void prbar_write(u32 v) +{ + writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR); +} + +static inline void prlar_write(u32 v) +{ + writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR); +} + +#endif + +static struct range __initdata io[MPU_MAX_REGIONS]; +static struct range __initdata mem[MPU_MAX_REGIONS]; + +static unsigned int __initdata mpu_max_regions; + +static __init bool is_region_fixed(int number) +{ + switch (number) { + case PMSAv8_XIP_REGION: + case PMSAv8_KERNEL_REGION: + return true; + default: + return false; + } +} + +void __init pmsav8_adjust_lowmem_bounds(void) +{ + phys_addr_t mem_end; + struct memblock_region *reg; + bool first = true; + + for_each_memblock(memory, reg) { + if (first) { + phys_addr_t phys_offset = PHYS_OFFSET; + + /* + * Initially only use memory continuous from + * PHYS_OFFSET */ + if (reg->base != phys_offset) + panic("First memory bank must be contiguous from PHYS_OFFSET"); + mem_end = reg->base + reg->size; + first = false; + } else { + /* + * memblock auto merges contiguous blocks, remove + * all blocks afterwards in one go (we can't remove + * blocks separately while iterating) + */ + pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", + &mem_end, ®->base); + memblock_remove(reg->base, 0 - reg->base); + break; + } + } +} + +static int __init __mpu_max_regions(void) +{ + static int max_regions; + u32 mpuir; + + if (max_regions) + return max_regions; + + mpuir = read_cpuid_mputype(); + + max_regions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; + + return max_regions; +} + +static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar) +{ + if (number > mpu_max_regions + || number >= MPU_MAX_REGIONS) + return -ENOENT; + + dsb(); + prsel_write(number); + isb(); + prbar_write(bar); + prlar_write(lar); + + mpu_rgn_info.rgns[number].prbar = bar; + mpu_rgn_info.rgns[number].prlar = lar; + + mpu_rgn_info.used++; + + return 0; +} + +static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end) +{ + u32 bar, lar; + + if (is_region_fixed(number)) + return -EINVAL; + + bar = start; + lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);; + + bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED; + lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN; + + return __pmsav8_setup_region(number, bar, lar); +} + +static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end) +{ + u32 bar, lar; + + if (is_region_fixed(number)) + return -EINVAL; + + bar = start; + lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);; + + bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN; + lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN; + + return __pmsav8_setup_region(number, bar, lar); +} + +static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end) +{ + u32 bar, lar; + + if (!is_region_fixed(number)) + return -EINVAL; + + bar = start; + lar = (end - 1) & ~(PMSAv8_MINALIGN - 1); + + bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED; + lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN; + + prsel_write(number); + isb(); + + if (prbar_read() != bar || prlar_read() != lar) + return -EINVAL; + + /* Reserved region was set up early, we just need a record for secondaries */ + mpu_rgn_info.rgns[number].prbar = bar; + mpu_rgn_info.rgns[number].prlar = lar; + + mpu_rgn_info.used++; + + return 0; +} + +#ifndef CONFIG_CPU_V7M +static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end) +{ + u32 bar, lar; + + if (number == PMSAv8_KERNEL_REGION) + return -EINVAL; + + bar = start; + lar = (end - 1) & ~(PMSAv8_MINALIGN - 1); + + bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED; + lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN; + + return __pmsav8_setup_region(number, bar, lar); +} +#endif + +void __init pmsav8_setup(void) +{ + int i, err = 0; + int region = PMSAv8_KERNEL_REGION; + + /* How many regions are supported ? */ + mpu_max_regions = __mpu_max_regions(); + + /* RAM: single chunk of memory */ + add_range(mem, ARRAY_SIZE(mem), 0, memblock.memory.regions[0].base, + memblock.memory.regions[0].base + memblock.memory.regions[0].size); + + /* IO: cover full 4G range */ + add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff); + + /* RAM and IO: exclude kernel */ + subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END)); + subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END)); + +#ifdef CONFIG_XIP_KERNEL + /* RAM and IO: exclude xip */ + subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); + subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); +#endif + +#ifndef CONFIG_CPU_V7M + /* RAM and IO: exclude vectors */ + subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE); + subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE); +#endif + /* IO: exclude RAM */ + for (i = 0; i < ARRAY_SIZE(mem); i++) + subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end); + + /* Now program MPU */ + +#ifdef CONFIG_XIP_KERNEL + /* ROM */ + err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom)); +#endif + /* Kernel */ + err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END)); + + + /* IO */ + for (i = 0; i < ARRAY_SIZE(io); i++) { + if (!io[i].end) + continue; + + err |= pmsav8_setup_io(region++, io[i].start, io[i].end); + } + + /* RAM */ + for (i = 0; i < ARRAY_SIZE(mem); i++) { + if (!mem[i].end) + continue; + + err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end); + } + + /* Vectors */ +#ifndef CONFIG_CPU_V7M + err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE); +#endif + if (err) + pr_warn("MPU region initialization failure! %d", err); + else + pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n", + mpu_rgn_info.used, mpu_max_regions); +} -- cgit v1.2.3 From 76ed0b803a2ab793a1b27d1dfe0de7955282cd34 Mon Sep 17 00:00:00 2001 From: David Rivshin Date: Wed, 25 Apr 2018 21:15:01 +0100 Subject: ARM: 8764/1: kgdb: fix NUMREGBYTES so that gdb_regs[] is the correct size NUMREGBYTES (which is used as the size for gdb_regs[]) is incorrectly based on DBG_MAX_REG_NUM instead of GDB_MAX_REGS. DBG_MAX_REG_NUM is the number of total registers, while GDB_MAX_REGS is the number of 'unsigned longs' it takes to serialize those registers. Since FP registers require 3 'unsigned longs' each, DBG_MAX_REG_NUM is smaller than GDB_MAX_REGS. This causes GDB 8.0 give the following error on connect: "Truncated register 19 in remote 'g' packet" This also causes the register serialization/deserialization logic to overflow gdb_regs[], overwriting whatever follows. Fixes: 834b2964b7ab ("kgdb,arm: fix register dump") Cc: # 2.6.37+ Signed-off-by: David Rivshin Acked-by: Rabin Vincent Tested-by: Daniel Thompson Signed-off-by: Russell King --- arch/arm/include/asm/kgdb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 3b73fdcf3627..8de1100d1067 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h @@ -77,7 +77,7 @@ extern int kgdb_fault_expected; #define KGDB_MAX_NO_CPUS 1 #define BUFMAX 400 -#define NUMREGBYTES (DBG_MAX_REG_NUM << 2) +#define NUMREGBYTES (GDB_MAX_REGS << 2) #define NUMCRITREGBYTES (32 << 2) #define _R0 0 -- cgit v1.2.3 From db4667a800175ede09d80fbd391c2578541121dd Mon Sep 17 00:00:00 2001 From: Stefan Agner Date: Tue, 8 May 2018 22:51:50 +0100 Subject: ARM: 8768/1: uaccess: remove const to avoid duplicate specifier Some users of get_user use the macro with an argument p which is already specified as static. When using clang this leads to a duplicate specifier: CC arch/arm/kernel/process.o In file included from init/do_mounts.c:15: In file included from ./include/linux/tty.h:7: In file included from ./include/uapi/linux/termios.h:6: In file included from ./arch/arm/include/generated/uapi/asm/termios.h:1: ./include/asm-generic/termios.h:25:6: warning: duplicate 'const' declaration specifier [-Wduplicate-decl-specifier] if (get_user(tmp, &termio->c_iflag) < 0) ^ ./arch/arm/include/asm/uaccess.h:195:3: note: expanded from macro 'get_user' __get_user_check(x, p); ^ ./arch/arm/include/asm/uaccess.h:155:12: note: expanded from macro '__get_user_check' register const typeof(*(p)) __user *__p asm("r0") = (p); Remove the const attribute from the register declaration to avoid the duplicate const specifier. In a test with ptrace.c and traps.c (both using get_user with non-const arguments for p) the generated code was exactly the same. Signed-off-by: Stefan Agner Signed-off-by: Russell King --- arch/arm/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 0bf2347495f1..3d614e90c19f 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -152,7 +152,7 @@ extern int __get_user_64t_4(void *); #define __get_user_check(x, p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ - register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register typeof(*(p)) __user *__p asm("r0") = (p); \ register typeof(x) __r2 asm("r2"); \ register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ -- cgit v1.2.3 From f5683e76f35b4ec5891031b6a29036efe0a1ff84 Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 14 May 2018 14:42:32 +0100 Subject: ARM: add more CPU part numbers for Cortex and Brahma B15 CPUs Add CPU part numbers for Cortex A53, A57, A72, A73, A75 and the Broadcom Brahma B15 CPU. Signed-off-by: Russell King Acked-by: Florian Fainelli Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren Acked-by: Marc Zyngier --- arch/arm/include/asm/cputype.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index cb546425da8a..26021980504d 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -77,8 +77,16 @@ #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 +#define ARM_CPU_PART_CORTEX_A53 0x4100d030 +#define ARM_CPU_PART_CORTEX_A57 0x4100d070 +#define ARM_CPU_PART_CORTEX_A72 0x4100d080 +#define ARM_CPU_PART_CORTEX_A73 0x4100d090 +#define ARM_CPU_PART_CORTEX_A75 0x4100d0a0 #define ARM_CPU_PART_MASK 0xff00fff0 +/* Broadcom cores */ +#define ARM_CPU_PART_BRAHMA_B15 0x420000f0 + /* DEC implemented cores */ #define ARM_CPU_PART_SA1100 0x4400a110 -- cgit v1.2.3 From a5b9177f69329314721aa7022b7e69dab23fa1f0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 10 May 2018 12:55:58 +0100 Subject: ARM: bugs: prepare processor bug infrastructure Prepare the processor bug infrastructure so that it can be expanded to check for per-processor bugs. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren Acked-by: Marc Zyngier --- arch/arm/include/asm/bugs.h | 4 ++-- arch/arm/kernel/Makefile | 1 + arch/arm/kernel/bugs.c | 9 +++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 arch/arm/kernel/bugs.c (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h index a97f1ea708d1..ed122d294f3f 100644 --- a/arch/arm/include/asm/bugs.h +++ b/arch/arm/include/asm/bugs.h @@ -10,10 +10,10 @@ #ifndef __ASM_BUGS_H #define __ASM_BUGS_H -#ifdef CONFIG_MMU extern void check_writebuffer_bugs(void); -#define check_bugs() check_writebuffer_bugs() +#ifdef CONFIG_MMU +extern void check_bugs(void); #else #define check_bugs() do { } while (0) #endif diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index b59ac4bf82b8..8cad59465af3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -31,6 +31,7 @@ else obj-y += entry-armv.o endif +obj-$(CONFIG_MMU) += bugs.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c new file mode 100644 index 000000000000..88024028bb70 --- /dev/null +++ b/arch/arm/kernel/bugs.c @@ -0,0 +1,9 @@ +// SPDX-Identifier: GPL-2.0 +#include +#include +#include + +void __init check_bugs(void) +{ + check_writebuffer_bugs(); +} -- cgit v1.2.3 From 26602161b5ba795928a5a719fe1d5d9f2ab5c3ef Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 10 May 2018 13:00:43 +0100 Subject: ARM: bugs: hook processor bug checking into SMP and suspend paths Check for CPU bugs when secondary processors are being brought online, and also when CPUs are resuming from a low power mode. This gives an opportunity to check that processor specific bug workarounds are correctly enabled for all paths that a CPU re-enters the kernel. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren Acked-by: Marc Zyngier --- arch/arm/include/asm/bugs.h | 2 ++ arch/arm/kernel/bugs.c | 5 +++++ arch/arm/kernel/smp.c | 4 ++++ arch/arm/kernel/suspend.c | 2 ++ 4 files changed, 13 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h index ed122d294f3f..73a99c72a930 100644 --- a/arch/arm/include/asm/bugs.h +++ b/arch/arm/include/asm/bugs.h @@ -14,8 +14,10 @@ extern void check_writebuffer_bugs(void); #ifdef CONFIG_MMU extern void check_bugs(void); +extern void check_other_bugs(void); #else #define check_bugs() do { } while (0) +#define check_other_bugs() do { } while (0) #endif #endif diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c index 88024028bb70..16e7ba2a9cc4 100644 --- a/arch/arm/kernel/bugs.c +++ b/arch/arm/kernel/bugs.c @@ -3,7 +3,12 @@ #include #include +void check_other_bugs(void) +{ +} + void __init check_bugs(void) { check_writebuffer_bugs(); + check_other_bugs(); } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 2da087926ebe..5ad0b67b9e33 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -405,6 +406,9 @@ asmlinkage void secondary_start_kernel(void) * before we continue - which happens after __cpu_up returns. */ set_cpu_online(cpu, true); + + check_other_bugs(); + complete(&cpu_running); local_irq_enable(); diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index a40ebb7c0896..d08099269e35 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -36,6 +37,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); local_flush_tlb_all(); + check_other_bugs(); } return ret; -- cgit v1.2.3 From 9d3a04925deeabb97c8e26d940b501a2873e8af3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 10 May 2018 13:07:29 +0100 Subject: ARM: bugs: add support for per-processor bug checking Add support for per-processor bug checking - each processor function descriptor gains a function pointer for this check, which must not be an __init function. If non-NULL, this will be called whenever a CPU enters the kernel via which ever path (boot CPU, secondary CPU startup, CPU resuming, etc.) This allows processor specific bug checks to validate that workaround bits are properly enabled by firmware via all entry paths to the kernel. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren Acked-by: Marc Zyngier --- arch/arm/include/asm/proc-fns.h | 4 ++++ arch/arm/kernel/bugs.c | 4 ++++ arch/arm/mm/proc-macros.S | 3 ++- 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index f2e1af45bd6f..e25f4392e1b2 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -36,6 +36,10 @@ extern struct processor { * Set up any processor specifics */ void (*_proc_init)(void); + /* + * Check for processor bugs + */ + void (*check_bugs)(void); /* * Disable any processor specifics */ diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c index 16e7ba2a9cc4..7be511310191 100644 --- a/arch/arm/kernel/bugs.c +++ b/arch/arm/kernel/bugs.c @@ -5,6 +5,10 @@ void check_other_bugs(void) { +#ifdef MULTI_CPU + if (processor.check_bugs) + processor.check_bugs(); +#endif } void __init check_bugs(void) diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index f10e31d0730a..81d0efb055c6 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -273,13 +273,14 @@ mcr p15, 0, ip, c7, c10, 4 @ data write barrier .endm -.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0 +.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 .type \name\()_processor_functions, #object .align 2 ENTRY(\name\()_processor_functions) .word \dabort .word \pabort .word cpu_\name\()_proc_init + .word \bugs .word cpu_\name\()_proc_fin .word cpu_\name\()_reset .word cpu_\name\()_do_idle -- cgit v1.2.3 From f5fe12b1eaee220ce62ff9afb8b90929c396595f Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 14 May 2018 14:20:21 +0100 Subject: ARM: spectre-v2: harden user aborts in kernel space In order to prevent aliasing attacks on the branch predictor, invalidate the BTB or instruction cache on CPUs that are known to be affected when taking an abort on a address that is outside of a user task limit: Cortex A8, A9, A12, A17, A73, A75: flush BTB. Cortex A15, Brahma B15: invalidate icache. If the IBE bit is not set, then there is little point to enabling the workaround. Signed-off-by: Russell King Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren --- arch/arm/include/asm/cp15.h | 3 ++ arch/arm/include/asm/system_misc.h | 15 ++++++++ arch/arm/mm/fault.c | 3 ++ arch/arm/mm/proc-v7-bugs.c | 73 +++++++++++++++++++++++++++++++++++--- arch/arm/mm/proc-v7.S | 8 +++-- 5 files changed, 94 insertions(+), 8 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h index 4c9fa72b59f5..07e27f212dc7 100644 --- a/arch/arm/include/asm/cp15.h +++ b/arch/arm/include/asm/cp15.h @@ -65,6 +65,9 @@ #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) +#define BPIALL __ACCESS_CP15(c7, 0, c5, 6) +#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0) + extern unsigned long cr_alignment; /* defined in entry-armv.S */ static inline unsigned long get_cr(void) diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h index 78f6db114faf..8e76db83c498 100644 --- a/arch/arm/include/asm/system_misc.h +++ b/arch/arm/include/asm/system_misc.h @@ -8,6 +8,7 @@ #include #include #include +#include extern void cpu_init(void); @@ -15,6 +16,20 @@ void soft_restart(unsigned long); extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); extern void (*arm_pm_idle)(void); +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +typedef void (*harden_branch_predictor_fn_t)(void); +DECLARE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); +static inline void harden_branch_predictor(void) +{ + harden_branch_predictor_fn_t fn = per_cpu(harden_branch_predictor_fn, + smp_processor_id()); + if (fn) + fn(); +} +#else +#define harden_branch_predictor() do { } while (0) +#endif + #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) #define UDBG_BADABORT (1 << 2) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index b75eada23d0a..3b1ba003c4f9 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -163,6 +163,9 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, { struct siginfo si; + if (addr > TASK_SIZE) + harden_branch_predictor(); + #ifdef CONFIG_DEBUG_USER if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index e46557db6446..85a2e3d6263c 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -2,7 +2,61 @@ #include #include -static __maybe_unused void cpu_v7_check_auxcr_set(bool *warned, +#include +#include +#include + +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR +DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn); + +static void harden_branch_predictor_bpiall(void) +{ + write_sysreg(0, BPIALL); +} + +static void harden_branch_predictor_iciallu(void) +{ + write_sysreg(0, ICIALLU); +} + +static void cpu_v7_spectre_init(void) +{ + const char *spectre_v2_method = NULL; + int cpu = smp_processor_id(); + + if (per_cpu(harden_branch_predictor_fn, cpu)) + return; + + switch (read_cpuid_part()) { + case ARM_CPU_PART_CORTEX_A8: + case ARM_CPU_PART_CORTEX_A9: + case ARM_CPU_PART_CORTEX_A12: + case ARM_CPU_PART_CORTEX_A17: + case ARM_CPU_PART_CORTEX_A73: + case ARM_CPU_PART_CORTEX_A75: + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_bpiall; + spectre_v2_method = "BPIALL"; + break; + + case ARM_CPU_PART_CORTEX_A15: + case ARM_CPU_PART_BRAHMA_B15: + per_cpu(harden_branch_predictor_fn, cpu) = + harden_branch_predictor_iciallu; + spectre_v2_method = "ICIALLU"; + break; + } + if (spectre_v2_method) + pr_info("CPU%u: Spectre v2: using %s workaround\n", + smp_processor_id(), spectre_v2_method); +} +#else +static void cpu_v7_spectre_init(void) +{ +} +#endif + +static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned, u32 mask, const char *msg) { u32 aux_cr; @@ -13,24 +67,33 @@ static __maybe_unused void cpu_v7_check_auxcr_set(bool *warned, if (!*warned) pr_err("CPU%u: %s", smp_processor_id(), msg); *warned = true; + return false; } + return true; } static DEFINE_PER_CPU(bool, spectre_warned); -static void check_spectre_auxcr(bool *warned, u32 bit) +static bool check_spectre_auxcr(bool *warned, u32 bit) { - if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) && + return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) && cpu_v7_check_auxcr_set(warned, bit, "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n"); } void cpu_v7_ca8_ibe(void) { - check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)); + if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6))) + cpu_v7_spectre_init(); } void cpu_v7_ca15_ibe(void) { - check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)); + if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0))) + cpu_v7_spectre_init(); +} + +void cpu_v7_bugs_init(void) +{ + cpu_v7_spectre_init(); } diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index fa9214036fb3..79510011e7eb 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -532,8 +532,10 @@ __v7_setup_stack: __INITDATA + .weak cpu_v7_bugs_init + @ define struct processor (see and proc-macros.S) - define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 + define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR @ generic v7 bpiall on context switch @@ -548,7 +550,7 @@ __v7_setup_stack: globl_equ cpu_v7_bpiall_do_suspend, cpu_v7_do_suspend globl_equ cpu_v7_bpiall_do_resume, cpu_v7_do_resume #endif - define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 + define_processor_functions v7_bpiall, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init #define HARDENED_BPIALL_PROCESSOR_FUNCTIONS v7_bpiall_processor_functions #else @@ -584,7 +586,7 @@ __v7_setup_stack: globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm #endif globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext - define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 + define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1, bugs=cpu_v7_bugs_init #endif @ Cortex-A15 - needs iciallu switch_mm for hardening -- cgit v1.2.3 From 3f7e8e2e1ebda787f156ce46e3f0a9ce2833fa4f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 1 Feb 2018 11:07:35 +0000 Subject: ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17 In order to avoid aliasing attacks against the branch predictor, let's invalidate the BTB on guest exit. This is made complicated by the fact that we cannot take a branch before invalidating the BTB. We only apply this to A12 and A17, which are the only two ARM cores on which this useful. Signed-off-by: Marc Zyngier Signed-off-by: Russell King Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren --- arch/arm/include/asm/kvm_asm.h | 2 -- arch/arm/include/asm/kvm_mmu.h | 17 +++++++++- arch/arm/kvm/hyp/hyp-entry.S | 71 ++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 85 insertions(+), 5 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 36dd2962a42d..df24ed48977d 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -61,8 +61,6 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; -extern char __kvm_hyp_vector[]; - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index de1b919404e4..d08ce9c41df4 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -297,7 +297,22 @@ static inline unsigned int kvm_get_vmid_bits(void) static inline void *kvm_get_hyp_vector(void) { - return kvm_ksym_ref(__kvm_hyp_vector); + switch(read_cpuid_part()) { +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + case ARM_CPU_PART_CORTEX_A12: + case ARM_CPU_PART_CORTEX_A17: + { + extern char __kvm_hyp_vector_bp_inv[]; + return kvm_ksym_ref(__kvm_hyp_vector_bp_inv); + } + +#endif + default: + { + extern char __kvm_hyp_vector[]; + return kvm_ksym_ref(__kvm_hyp_vector); + } + } } static inline int kvm_map_vectors(void) diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 95a2faefc070..e789f52a5129 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -71,6 +71,66 @@ __kvm_hyp_vector: W(b) hyp_irq W(b) hyp_fiq +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + .align 5 +__kvm_hyp_vector_bp_inv: + .global __kvm_hyp_vector_bp_inv + + /* + * We encode the exception entry in the bottom 3 bits of + * SP, and we have to guarantee to be 8 bytes aligned. + */ + W(add) sp, sp, #1 /* Reset 7 */ + W(add) sp, sp, #1 /* Undef 6 */ + W(add) sp, sp, #1 /* Syscall 5 */ + W(add) sp, sp, #1 /* Prefetch abort 4 */ + W(add) sp, sp, #1 /* Data abort 3 */ + W(add) sp, sp, #1 /* HVC 2 */ + W(add) sp, sp, #1 /* IRQ 1 */ + W(nop) /* FIQ 0 */ + + mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ + isb + +#ifdef CONFIG_THUMB2_KERNEL + /* + * Yet another silly hack: Use VPIDR as a temp register. + * Thumb2 is really a pain, as SP cannot be used with most + * of the bitwise instructions. The vect_br macro ensures + * things gets cleaned-up. + */ + mcr p15, 4, r0, c0, c0, 0 /* VPIDR */ + mov r0, sp + and r0, r0, #7 + sub sp, sp, r0 + push {r1, r2} + mov r1, r0 + mrc p15, 4, r0, c0, c0, 0 /* VPIDR */ + mrc p15, 0, r2, c0, c0, 0 /* MIDR */ + mcr p15, 4, r2, c0, c0, 0 /* VPIDR */ +#endif + +.macro vect_br val, targ +ARM( eor sp, sp, #\val ) +ARM( tst sp, #7 ) +ARM( eorne sp, sp, #\val ) + +THUMB( cmp r1, #\val ) +THUMB( popeq {r1, r2} ) + + beq \targ +.endm + + vect_br 0, hyp_fiq + vect_br 1, hyp_irq + vect_br 2, hyp_hvc + vect_br 3, hyp_dabt + vect_br 4, hyp_pabt + vect_br 5, hyp_svc + vect_br 6, hyp_undef + vect_br 7, hyp_reset +#endif + .macro invalid_vector label, cause .align \label: mov r0, #\cause @@ -149,7 +209,14 @@ hyp_hvc: bx ip 1: - push {lr} + /* + * Pushing r2 here is just a way of keeping the stack aligned to + * 8 bytes on any path that can trigger a HYP exception. Here, + * we may well be about to jump into the guest, and the guest + * exit would otherwise be badly decoded by our fancy + * "decode-exception-without-a-branch" code... + */ + push {r2, lr} mov lr, r0 mov r0, r1 @@ -159,7 +226,7 @@ hyp_hvc: THUMB( orr lr, #1) blx lr @ Call the HYP function - pop {lr} + pop {r2, lr} eret guest_trap: -- cgit v1.2.3 From 0c47ac8cd157727e7a532d665d6fb1b5fd333977 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 1 Feb 2018 11:07:38 +0000 Subject: ARM: KVM: invalidate icache on guest exit for Cortex-A15 In order to avoid aliasing attacks against the branch predictor on Cortex-A15, let's invalidate the BTB on guest exit, which can only be done by invalidating the icache (with ACTLR[0] being set). We use the same hack as for A12/A17 to perform the vector decoding. Signed-off-by: Marc Zyngier Signed-off-by: Russell King Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren --- arch/arm/include/asm/kvm_mmu.h | 5 +++++ arch/arm/kvm/hyp/hyp-entry.S | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index d08ce9c41df4..48edb1f4ced4 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -306,6 +306,11 @@ static inline void *kvm_get_hyp_vector(void) return kvm_ksym_ref(__kvm_hyp_vector_bp_inv); } + case ARM_CPU_PART_CORTEX_A15: + { + extern char __kvm_hyp_vector_ic_inv[]; + return kvm_ksym_ref(__kvm_hyp_vector_ic_inv); + } #endif default: { diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index e789f52a5129..918a05dd2d63 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -72,6 +72,28 @@ __kvm_hyp_vector: W(b) hyp_fiq #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + .align 5 +__kvm_hyp_vector_ic_inv: + .global __kvm_hyp_vector_ic_inv + + /* + * We encode the exception entry in the bottom 3 bits of + * SP, and we have to guarantee to be 8 bytes aligned. + */ + W(add) sp, sp, #1 /* Reset 7 */ + W(add) sp, sp, #1 /* Undef 6 */ + W(add) sp, sp, #1 /* Syscall 5 */ + W(add) sp, sp, #1 /* Prefetch abort 4 */ + W(add) sp, sp, #1 /* Data abort 3 */ + W(add) sp, sp, #1 /* HVC 2 */ + W(add) sp, sp, #1 /* IRQ 1 */ + W(nop) /* FIQ 0 */ + + mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */ + isb + + b decode_vectors + .align 5 __kvm_hyp_vector_bp_inv: .global __kvm_hyp_vector_bp_inv @@ -92,6 +114,8 @@ __kvm_hyp_vector_bp_inv: mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ isb +decode_vectors: + #ifdef CONFIG_THUMB2_KERNEL /* * Yet another silly hack: Use VPIDR as a temp register. -- cgit v1.2.3 From 3c908e16396d130608e831b7fac4b167a2ede6ba Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 10 May 2018 17:52:18 +0100 Subject: ARM: spectre-v2: KVM: invalidate icache on guest exit for Brahma B15 Include Brahma B15 in the Spectre v2 KVM workarounds. Signed-off-by: Russell King Acked-by: Florian Fainelli Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren Acked-by: Marc Zyngier --- arch/arm/include/asm/kvm_mmu.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 48edb1f4ced4..fea770f78144 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -306,6 +306,7 @@ static inline void *kvm_get_hyp_vector(void) return kvm_ksym_ref(__kvm_hyp_vector_bp_inv); } + case ARM_CPU_PART_BRAHMA_B15: case ARM_CPU_PART_CORTEX_A15: { extern char __kvm_hyp_vector_ic_inv[]; -- cgit v1.2.3 From add5609877c6785cc002c6ed7e008b1d61064439 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 16 May 2018 11:29:30 +0100 Subject: ARM: KVM: report support for SMCCC_ARCH_WORKAROUND_1 Report support for SMCCC_ARCH_WORKAROUND_1 to KVM guests for affected CPUs. Signed-off-by: Russell King Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren Reviewed-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 248b930563e5..11f91744ffb0 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -311,8 +312,17 @@ static inline void kvm_arm_vhe_guest_exit(void) {} static inline bool kvm_arm_harden_branch_predictor(void) { - /* No way to detect it yet, pretend it is not there. */ - return false; + switch(read_cpuid_part()) { +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + case ARM_CPU_PART_BRAHMA_B15: + case ARM_CPU_PART_CORTEX_A12: + case ARM_CPU_PART_CORTEX_A15: + case ARM_CPU_PART_CORTEX_A17: + return true; +#endif + default: + return false; + } } #endif /* __ARM_KVM_HOST_H__ */ -- cgit v1.2.3 From a78d156587931a2c3b354534aa772febf6c9e855 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 11 May 2018 11:15:29 +0100 Subject: ARM: spectre-v1: add speculation barrier (csdb) macros Add assembly and C macros for the new CSDB instruction. Signed-off-by: Russell King Acked-by: Mark Rutland Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren --- arch/arm/include/asm/assembler.h | 8 ++++++++ arch/arm/include/asm/barrier.h | 13 +++++++++++++ 2 files changed, 21 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index bc8d4bbd82e2..ef1386b1af9b 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -447,6 +447,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .size \name , . - \name .endm + .macro csdb +#ifdef CONFIG_THUMB2_KERNEL + .inst.w 0xf3af8014 +#else + .inst 0xe320f014 +#endif + .endm + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req #ifndef CONFIG_CPU_USE_DOMAINS adds \tmp, \addr, #\size - 1 diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 40f5c410fd8c..3d9c1d4b7e75 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -17,6 +17,12 @@ #define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") #define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") +#ifdef CONFIG_THUMB2_KERNEL +#define CSDB ".inst.w 0xf3af8014" +#else +#define CSDB ".inst 0xe320f014" +#endif +#define csdb() __asm__ __volatile__(CSDB : : : "memory") #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 #define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ : : "r" (0) : "memory") @@ -37,6 +43,13 @@ #define dmb(x) __asm__ __volatile__ ("" : : : "memory") #endif +#ifndef CSDB +#define CSDB +#endif +#ifndef csdb +#define csdb() +#endif + #ifdef CONFIG_ARM_HEAVY_MB extern void (*soc_mb)(void); extern void arm_heavy_mb(void); -- cgit v1.2.3 From 1d4238c56f9816ce0f9c8dbe42d7f2ad81cb6613 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 11 May 2018 15:06:58 +0100 Subject: ARM: spectre-v1: add array_index_mask_nospec() implementation Add an implementation of the array_index_mask_nospec() function for mitigating Spectre variant 1 throughout the kernel. Signed-off-by: Russell King Acked-by: Mark Rutland Boot-tested-by: Tony Lindgren Reviewed-by: Tony Lindgren --- arch/arm/include/asm/barrier.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 3d9c1d4b7e75..69772e742a0a 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -76,6 +76,25 @@ extern void arm_heavy_mb(void); #define __smp_rmb() __smp_mb() #define __smp_wmb() dmb(ishst) +#ifdef CONFIG_CPU_SPECTRE +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( + "cmp %1, %2\n" + " sbc %0, %1, %1\n" + CSDB + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc"); + + return mask; +} +#define array_index_mask_nospec array_index_mask_nospec +#endif + #include #endif /* !__ASSEMBLY__ */ -- cgit v1.2.3