diff options
author | Alexandre Ghiti <alex@ghiti.fr> | 2021-06-24 15:00:41 +0300 |
---|---|---|
committer | Palmer Dabbelt <palmerdabbelt@google.com> | 2021-07-01 07:18:58 +0300 |
commit | e5c35fa0401971701dcd7675f471b664698244dd (patch) | |
tree | 6f42965bac8ba06c33fa462472f852d92be187cd /arch/riscv/include/asm/page.h | |
parent | c10bc260e7c030364b5150aac7ebf048ddfb9502 (diff) | |
download | linux-e5c35fa0401971701dcd7675f471b664698244dd.tar.xz |
riscv: Map the kernel with correct permissions the first time
For 64-bit kernels, we map all the kernel with write and execute
permissions and afterwards remove writability from text and executability
from data.
For 32-bit kernels, the kernel mapping resides in the linear mapping, so we
map all the linear mapping as writable and executable and afterwards we
remove those properties for unused memory and kernel mapping as
described above.
Change this behavior to directly map the kernel with correct permissions
and avoid going through the whole mapping to fix the permissions.
At the same time, this fixes an issue introduced by commit 2bfc6cd81bd1
("riscv: Move kernel mapping outside of linear mapping") as reported
here https://github.com/starfive-tech/linux/issues/17.
Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to 'arch/riscv/include/asm/page.h')
-rw-r--r-- | arch/riscv/include/asm/page.h | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 6e004d8fda4d..349e4f9874cc 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -95,6 +95,7 @@ extern unsigned long va_kernel_pa_offset; #endif extern unsigned long va_kernel_xip_pa_offset; extern unsigned long pfn_base; +extern uintptr_t load_sz; #define ARCH_PFN_OFFSET (pfn_base) #else #define va_pa_offset 0 @@ -108,6 +109,11 @@ extern unsigned long pfn_base; extern unsigned long kernel_virt_addr; #ifdef CONFIG_64BIT +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET && (x) < kernel_virt_addr) + #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) #define kernel_mapping_pa_to_va(y) ({ \ unsigned long _y = y; \ @@ -127,10 +133,15 @@ extern unsigned long kernel_virt_addr; #define __va_to_pa_nodebug(x) ({ \ unsigned long _x = x; \ - (_x < kernel_virt_addr) ? \ + is_linear_mapping(_x) ? \ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ }) #else +#define is_kernel_mapping(x) \ + ((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz)) +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET) + #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) #endif /* CONFIG_64BIT */ |