diff options
author | Helge Deller <deller@gmx.de> | 2019-05-10 18:00:01 +0300 |
---|---|---|
committer | Helge Deller <deller@gmx.de> | 2019-05-10 22:00:44 +0300 |
commit | 8d0e051cc75e2b1a7e2fd51fc56af332c9619618 (patch) | |
tree | ccf08363081a35a3cd8fb4ada4b62cc5fefb1840 | |
parent | e6eb5fe9123f05dcbf339ae5c0b6d32fcc0685d5 (diff) | |
download | linux-8d0e051cc75e2b1a7e2fd51fc56af332c9619618.tar.xz |
parisc: Enable the ro_after_init feature
This patch modifies the initial page mapping functions in the following way:
During bootup the init, text and data pages will be mapped RWX and if
supported, with huge pages.
At final stage of the bootup, the kernel calls free_initmem() and then all
pages will be remapped either R-X (for text and read-only data) or RW- (for
data). The __init pages will be dropped.
This reflects the behaviour of the x86 platform.
Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r-- | arch/parisc/include/asm/cache.h | 3 | ||||
-rw-r--r-- | arch/parisc/kernel/vmlinux.lds.S | 3 | ||||
-rw-r--r-- | arch/parisc/mm/init.c | 67 |
3 files changed, 35 insertions, 38 deletions
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index 4016fe1c65a9..73ca89a47f49 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -24,9 +24,6 @@ #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -/* Read-only memory is marked before mark_rodata_ro() is called. */ -#define __ro_after_init __read_mostly - void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ void disable_sr_hashing(void); /* turns off space register hashing */ diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index a8be7a47fcc0..c3b1b9c24ede 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -18,9 +18,6 @@ *(.data..vm0.pgd) \ *(.data..vm0.pte) -/* No __ro_after_init data in the .rodata section - which will always be ro */ -#define RO_AFTER_INIT_DATA - #include <asm-generic/vmlinux.lds.h> /* needed for the processor specific cache alignment size */ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 3b0f9eab7f2c..b99bcbf1ecdb 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -345,16 +345,7 @@ static void __init setup_bootmem(void) memblock_dump_all(); } -static int __init parisc_text_address(unsigned long vaddr) -{ - static unsigned long head_ptr __initdata; - - if (!head_ptr) - head_ptr = PAGE_MASK & (unsigned long) - dereference_function_descriptor(&parisc_kernel_start); - - return core_kernel_text(vaddr) || vaddr == head_ptr; -} +static bool kernel_set_to_readonly; static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, @@ -372,10 +363,11 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long vaddr; unsigned long ro_start; unsigned long ro_end; - unsigned long kernel_end; + unsigned long kernel_start, kernel_end; ro_start = __pa((unsigned long)_text); ro_end = __pa((unsigned long)&data_start); + kernel_start = __pa((unsigned long)&__init_begin); kernel_end = __pa((unsigned long)&_end); end_paddr = start_paddr + size; @@ -438,26 +430,30 @@ static void __init map_pages(unsigned long start_vaddr, pg_table = (pte_t *) __va(pg_table) + start_pte; for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { pte_t pte; - - if (force) - pte = __mk_pte(address, pgprot); - else if (parisc_text_address(vaddr)) { - pte = __mk_pte(address, PAGE_KERNEL_EXEC); - if (address >= ro_start && address < kernel_end) - pte = pte_mkhuge(pte); + pgprot_t prot; + bool huge = false; + + if (force) { + prot = pgprot; + } else if (address < kernel_start || address >= kernel_end) { + /* outside kernel memory */ + prot = PAGE_KERNEL; + } else if (!kernel_set_to_readonly) { + /* still initializing, allow writing to RO memory */ + prot = PAGE_KERNEL_RWX; + huge = true; + } else if (address >= ro_start) { + /* Code (ro) and Data areas */ + prot = (address < ro_end) ? + PAGE_KERNEL_EXEC : PAGE_KERNEL; + huge = true; + } else { + prot = PAGE_KERNEL; } - else -#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) - if (address >= ro_start && address < ro_end) { - pte = __mk_pte(address, PAGE_KERNEL_EXEC); + + pte = __mk_pte(address, prot); + if (huge) pte = pte_mkhuge(pte); - } else -#endif - { - pte = __mk_pte(address, pgprot); - if (address >= ro_start && address < kernel_end) - pte = pte_mkhuge(pte); - } if (address >= end_paddr) break; @@ -493,6 +489,12 @@ void __ref free_initmem(void) { unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_end = (unsigned long)__init_end; + unsigned long kernel_end = (unsigned long)&_end; + + /* Remap kernel text and data, but do not touch init section yet. */ + kernel_set_to_readonly = true; + map_pages(init_end, __pa(init_end), kernel_end - init_end, + PAGE_KERNEL, 0); /* The init text pages are marked R-X. We have to * flush the icache and mark them RW- @@ -509,7 +511,7 @@ void __ref free_initmem(void) PAGE_KERNEL, 1); /* force the kernel to see the new TLB entries */ - __flush_tlb_range(0, init_begin, init_end); + __flush_tlb_range(0, init_begin, kernel_end); /* finally dump all the instructions which were cached, since the * pages are no-longer executable */ @@ -527,8 +529,9 @@ void mark_rodata_ro(void) { /* rodata memory was already mapped with KERNEL_RO access rights by pagetable_init() and map_pages(). No need to do additional stuff here */ - printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", - (unsigned long)(__end_rodata - __start_rodata) >> 10); + unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; + + pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10); } #endif |