diff options
author | Alexander Gordeev <agordeev@linux.ibm.com> | 2024-03-22 16:39:57 +0300 |
---|---|---|
committer | Alexander Gordeev <agordeev@linux.ibm.com> | 2024-04-17 14:38:02 +0300 |
commit | 56b1069c40c777e9cba595a62857293628067d65 (patch) | |
tree | 28cdfa38027fbc9e156b421582fa00620ca15a2b /arch/s390/boot | |
parent | 54f2ecc3188f78723267826f634e0747169f8685 (diff) | |
download | linux-56b1069c40c777e9cba595a62857293628067d65.tar.xz |
s390/boot: Rework deployment of the kernel image
Rework deployment of kernel image for both compressed and
uncompressed variants as defined by CONFIG_KERNEL_UNCOMPRESSED
kernel configuration variable.
In case CONFIG_KERNEL_UNCOMPRESSED is disabled avoid uncompressing
the kernel to a temporary buffer and copying it to the target
address. Instead, uncompress it directly to the target destination.
In case CONFIG_KERNEL_UNCOMPRESSED is enabled avoid moving the
kernel to default 0x100000 location when KASLR is disabled or
failed. Instead, use the uncompressed kernel image directly.
In case KASLR is disabled or failed .amode31 section location in
memory is not randomized and precedes the kernel image. In case
CONFIG_KERNEL_UNCOMPRESSED is disabled that location overlaps the
area used by the decompression algorithm. That is fine, since that
area is not used after the decompression finished and the size of
.amode31 section is not expected to exceed BOOT_HEAP_SIZE ever.
There is no decompression in case CONFIG_KERNEL_UNCOMPRESSED is
enabled. Therefore, rename decompress_kernel() to deploy_kernel(),
which better describes both uncompressed and compressed cases.
Introduce AMODE31_SIZE macro to avoid immediate value of 0x3000
(the size of .amode31 section) in the decompressor linker script.
Modify the vmlinux linker script to force the size of .amode31
section to AMODE31_SIZE (the value of (_eamode31 - _samode31)
could otherwise differ as result of compiler options used).
Introduce __START_KERNEL macro that defines the kernel ELF image
entry point and set it to the currrent value of 0x100000.
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Diffstat (limited to 'arch/s390/boot')
-rw-r--r-- | arch/s390/boot/boot.h | 1 | ||||
-rw-r--r-- | arch/s390/boot/decompressor.c | 15 | ||||
-rw-r--r-- | arch/s390/boot/decompressor.h | 8 | ||||
-rw-r--r-- | arch/s390/boot/startup.c | 90 | ||||
-rw-r--r-- | arch/s390/boot/vmem.c | 7 | ||||
-rw-r--r-- | arch/s390/boot/vmlinux.lds.S | 3 |
6 files changed, 56 insertions, 68 deletions
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h index 412cdd490403..85da1c6cef4f 100644 --- a/arch/s390/boot/boot.h +++ b/arch/s390/boot/boot.h @@ -17,7 +17,6 @@ struct machine_info { }; struct vmlinux_info { - unsigned long default_lma; unsigned long entry; unsigned long image_size; /* does not include .bss */ unsigned long bss_size; /* uncompressed image .bss size */ diff --git a/arch/s390/boot/decompressor.c b/arch/s390/boot/decompressor.c index d762733a0753..f478e8e9cbda 100644 --- a/arch/s390/boot/decompressor.c +++ b/arch/s390/boot/decompressor.c @@ -63,24 +63,13 @@ static unsigned long free_mem_end_ptr = (unsigned long) _end + BOOT_HEAP_SIZE; #include "../../../../lib/decompress_unzstd.c" #endif -#define decompress_offset ALIGN((unsigned long)_end + BOOT_HEAP_SIZE, PAGE_SIZE) - unsigned long mem_safe_offset(void) { - /* - * due to 4MB HEAD_SIZE for bzip2 - * 'decompress_offset + vmlinux.image_size' could be larger than - * kernel at final position + its .bss, so take the larger of two - */ - return max(decompress_offset + vmlinux.image_size, - vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size); + return ALIGN(free_mem_end_ptr, PAGE_SIZE); } -void *decompress_kernel(void) +void deploy_kernel(void *output) { - void *output = (void *)decompress_offset; - __decompress(_compressed_start, _compressed_end - _compressed_start, NULL, NULL, output, vmlinux.image_size, NULL, error); - return output; } diff --git a/arch/s390/boot/decompressor.h b/arch/s390/boot/decompressor.h index 92b81d2ea35d..4f966f06bd65 100644 --- a/arch/s390/boot/decompressor.h +++ b/arch/s390/boot/decompressor.h @@ -2,11 +2,9 @@ #ifndef BOOT_COMPRESSED_DECOMPRESSOR_H #define BOOT_COMPRESSED_DECOMPRESSOR_H -#ifdef CONFIG_KERNEL_UNCOMPRESSED -static inline void *decompress_kernel(void) { return NULL; } -#else -void *decompress_kernel(void); -#endif +#ifndef CONFIG_KERNEL_UNCOMPRESSED unsigned long mem_safe_offset(void); +void deploy_kernel(void *output); +#endif #endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */ diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index fc9c1d51ef8d..949eff7107cc 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -109,9 +109,19 @@ static void setup_lpp(void) } #ifdef CONFIG_KERNEL_UNCOMPRESSED -unsigned long mem_safe_offset(void) +static unsigned long mem_safe_offset(void) { - return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size; + return (unsigned long)_compressed_start; +} + +static void deploy_kernel(void *output) +{ + void *uncompressed_start = (void *)_compressed_start; + + if (output == uncompressed_start) + return; + memmove(output, uncompressed_start, vmlinux.image_size); + memset(uncompressed_start, 0, vmlinux.image_size); } #endif @@ -154,18 +164,18 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end; dynsym = (Elf64_Sym *) vmlinux.dynsym_start; for (rela = rela_start; rela < rela_end; rela++) { - loc = rela->r_offset + phys_offset; + loc = rela->r_offset + phys_offset - __START_KERNEL; val = rela->r_addend; r_sym = ELF64_R_SYM(rela->r_info); if (r_sym) { if (dynsym[r_sym].st_shndx != SHN_UNDEF) - val += dynsym[r_sym].st_value + offset; + val += dynsym[r_sym].st_value + offset - __START_KERNEL; } else { /* - * 0 == undefined symbol table index (STN_UNDEF), + * 0 == undefined symbol table index (SHN_UNDEF), * used for R_390_RELATIVE, only add KASLR offset */ - val += offset; + val += offset - __START_KERNEL; } r_type = ELF64_R_TYPE(rela->r_info); rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0); @@ -206,7 +216,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, loc = (long)*reloc + phys_offset; if (loc < min_addr || loc > max_addr) error("64-bit relocation outside of kernel!\n"); - *(u64 *)loc += offset; + *(u64 *)loc += offset - __START_KERNEL; } } @@ -219,7 +229,7 @@ static void kaslr_adjust_got(unsigned long offset) * reason. Adjust the GOT entries. */ for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) - *entry += offset; + *entry += offset - __START_KERNEL; } #endif @@ -294,6 +304,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page); /* choose kernel address space layout: 4 or 3 levels. */ + BUILD_BUG_ON(!IS_ALIGNED(__START_KERNEL, THREAD_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE)); BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE); vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE); @@ -383,9 +394,9 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size) /* * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's. */ -static void clear_bss_section(unsigned long vmlinux_lma) +static void clear_bss_section(unsigned long kernel_start) { - memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size); + memset((void *)kernel_start + vmlinux.image_size, 0, vmlinux.bss_size); } /* @@ -402,7 +413,7 @@ static void setup_vmalloc_size(void) vmalloc_size = max(size, vmalloc_size); } -static void kaslr_adjust_vmlinux_info(unsigned long offset) +static void kaslr_adjust_vmlinux_info(long offset) { vmlinux.bootdata_off += offset; vmlinux.bootdata_preserved_off += offset; @@ -426,24 +437,30 @@ static void kaslr_adjust_vmlinux_info(unsigned long offset) #endif } +static void fixup_vmlinux_info(void) +{ + vmlinux.entry -= __START_KERNEL; + kaslr_adjust_vmlinux_info(-__START_KERNEL); +} + void startup_kernel(void) { - unsigned long max_physmem_end; - unsigned long vmlinux_lma = 0; + unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size; + unsigned long nokaslr_offset_phys = mem_safe_offset(); unsigned long amode31_lma = 0; - unsigned long kernel_size; + unsigned long max_physmem_end; unsigned long asce_limit; unsigned long safe_addr; - void *img; psw_t psw; + fixup_vmlinux_info(); setup_lpp(); - safe_addr = mem_safe_offset(); + safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size); /* - * Reserve decompressor memory together with decompression heap, buffer and - * memory which might be occupied by uncompressed kernel at default 1Mb - * position (if KASLR is off or failed). + * Reserve decompressor memory together with decompression heap, + * buffer and memory which might be occupied by uncompressed kernel + * (if KASLR is off or failed). */ physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr); if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size) @@ -463,7 +480,6 @@ void startup_kernel(void) max_physmem_end = detect_max_physmem_end(); setup_ident_map_size(max_physmem_end); setup_vmalloc_size(); - kernel_size = vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size; asce_limit = setup_kernel_memory_layout(kernel_size); /* got final ident_map_size, physmem allocations could be performed now */ physmem_set_usable_limit(ident_map_size); @@ -472,32 +488,20 @@ void startup_kernel(void) rescue_initrd(safe_addr, ident_map_size); rescue_relocs(); - if (kaslr_enabled()) { - vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size, - THREAD_SIZE, vmlinux.default_lma, - ident_map_size); - if (vmlinux_lma) { - __kaslr_offset_phys = vmlinux_lma - vmlinux.default_lma; - kaslr_adjust_vmlinux_info(__kaslr_offset_phys); - } - } - vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma; - physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size); - - if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) { - img = decompress_kernel(); - memmove((void *)vmlinux_lma, img, vmlinux.image_size); - } else if (__kaslr_offset_phys) { - img = (void *)vmlinux.default_lma; - memmove((void *)vmlinux_lma, img, vmlinux.image_size); - memset(img, 0, vmlinux.image_size); - } + if (kaslr_enabled()) + __kaslr_offset_phys = randomize_within_range(kernel_size, THREAD_SIZE, 0, ident_map_size); + if (!__kaslr_offset_phys) + __kaslr_offset_phys = nokaslr_offset_phys; + kaslr_adjust_vmlinux_info(__kaslr_offset_phys); + physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size); + deploy_kernel((void *)__kaslr_offset_phys); /* vmlinux decompression is done, shrink reserved low memory */ physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end); if (kaslr_enabled()) amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G); - amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size; + if (!amode31_lma) + amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size; physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size); /* @@ -513,8 +517,8 @@ void startup_kernel(void) * - copy_bootdata() must follow setup_vmem() to propagate changes * to bootdata made by setup_vmem() */ - clear_bss_section(vmlinux_lma); - kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size, + clear_bss_section(__kaslr_offset_phys); + kaslr_adjust_relocs(__kaslr_offset_phys, __kaslr_offset_phys + vmlinux.image_size, __kaslr_offset, __kaslr_offset_phys); kaslr_adjust_got(__kaslr_offset); free_relocs(); diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index 86edd432e593..96d48b7112d4 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -453,9 +453,6 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l * To allow prefixing the lowcore must be mapped with 4KB pages. * To prevent creation of a large page at address 0 first map * the lowcore and create the identity mapping only afterwards. - * - * Skip 0x100000 bytes for kernel pgtables, as per the linker script: - * . = 0x100000; */ pgtable_populate(0, sizeof(struct lowcore), POPULATE_DIRECT); for_each_physmem_usable_range(i, &start, &end) { @@ -463,7 +460,7 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l (unsigned long)__identity_va(end), POPULATE_IDENTITY); } - pgtable_populate(kernel_start + 0x100000, kernel_end, POPULATE_KERNEL); + pgtable_populate(kernel_start, kernel_end, POPULATE_KERNEL); pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT); pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore), POPULATE_ABS_LOWCORE); @@ -471,7 +468,7 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l POPULATE_NONE); memcpy_real_ptep = __identity_va(__virt_to_kpte(__memcpy_real_area)); - kasan_populate_shadow(kernel_start + 0x100000, kernel_end); + kasan_populate_shadow(kernel_start, kernel_end); S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits; S390_lowcore.user_asce = s390_invalid_asce; diff --git a/arch/s390/boot/vmlinux.lds.S b/arch/s390/boot/vmlinux.lds.S index 3d7ea585ab99..9d6d26908355 100644 --- a/arch/s390/boot/vmlinux.lds.S +++ b/arch/s390/boot/vmlinux.lds.S @@ -100,7 +100,8 @@ SECTIONS _decompressor_end = .; #ifdef CONFIG_KERNEL_UNCOMPRESSED - . = 0x100000; + . = ALIGN(PAGE_SIZE); + . += AMODE31_SIZE; /* .amode31 section */ #else . = ALIGN(8); #endif |