diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-14 01:31:47 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-12-14 01:31:47 +0300 |
commit | fc4c9f450493daef1c996c9d4b3c647ec3121509 (patch) | |
tree | 99078a5d34ba783b9b43092fe2c275784c7cab98 /arch | |
parent | 717e6eb49bdd98357d14c90d60a3409196b33cfc (diff) | |
parent | e8dfdf3162eb549d064b8c10b1564f7e8ee82591 (diff) | |
download | linux-fc4c9f450493daef1c996c9d4b3c647ec3121509.tar.xz |
Merge tag 'efi-next-for-v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi
Pull EFI updates from Ard Biesheuvel:
"Another fairly sizable pull request, by EFI subsystem standards.
Most of the work was done by me, some of it in collaboration with the
distro and bootloader folks (GRUB, systemd-boot), where the main focus
has been on removing pointless per-arch differences in the way EFI
boots a Linux kernel.
- Refactor the zboot code so that it incorporates all the EFI stub
logic, rather than calling the decompressed kernel as a EFI app.
- Add support for initrd= command line option to x86 mixed mode.
- Allow initrd= to be used with arbitrary EFI accessible file systems
instead of just the one the kernel itself was loaded from.
- Move some x86-only handling and manipulation of the EFI memory map
into arch/x86, as it is not used anywhere else.
- More flexible handling of any random seeds provided by the boot
environment (i.e., systemd-boot) so that it becomes available much
earlier during the boot.
- Allow improved arch-agnostic EFI support in loaders, by setting a
uniform baseline of supported features, and adding a generic magic
number to the DOS/PE header. This should allow loaders such as GRUB
or systemd-boot to reduce the amount of arch-specific handling
substantially.
- (arm64) Run EFI runtime services from a dedicated stack, and use it
to recover from synchronous exceptions that might occur in the
firmware code.
- (arm64) Ensure that we don't allocate memory outside of the 48-bit
addressable physical range.
- Make EFI pstore record size configurable
- Add support for decoding CXL specific CPER records"
* tag 'efi-next-for-v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi: (43 commits)
arm64: efi: Recover from synchronous exceptions occurring in firmware
arm64: efi: Execute runtime services from a dedicated stack
arm64: efi: Limit allocations to 48-bit addressable physical region
efi: Put Linux specific magic number in the DOS header
efi: libstub: Always enable initrd command line loader and bump version
efi: stub: use random seed from EFI variable
efi: vars: prohibit reading random seed variables
efi: random: combine bootloader provided RNG seed with RNG protocol output
efi/cper, cxl: Decode CXL Error Log
efi/cper, cxl: Decode CXL Protocol Error Section
efi: libstub: fix efi_load_initrd_dev_path() kernel-doc comment
efi: x86: Move EFI runtime map sysfs code to arch/x86
efi: runtime-maps: Clarify purpose and enable by default for kexec
efi: pstore: Add module parameter for setting the record size
efi: xen: Set EFI_PARAVIRT for Xen dom0 boot on all architectures
efi: memmap: Move manipulation routines into x86 arch tree
efi: memmap: Move EFI fake memmap support into x86 arch tree
efi: libstub: Undeprecate the command line initrd loader
efi: libstub: Add mixed mode support to command line initrd loader
efi: libstub: Permit mixed mode return types other than efi_status_t
...
Diffstat (limited to 'arch')
25 files changed, 910 insertions, 190 deletions
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h index 4bdd930167c0..b95241b1ca65 100644 --- a/arch/arm/include/asm/efi.h +++ b/arch/arm/include/asm/efi.h @@ -43,9 +43,6 @@ void efi_virtmap_unload(void); /* arch specific definitions used by the stub code */ -struct screen_info *alloc_screen_info(void); -void free_screen_info(struct screen_info *si); - /* * A reasonable upper bound for the uncompressed kernel size is 32 MBytes, * so we will reserve that amount of memory. We have no easy way to tell what diff --git a/arch/arm/kernel/efi.c b/arch/arm/kernel/efi.c index e50ad7eefc02..882104f43b3b 100644 --- a/arch/arm/kernel/efi.c +++ b/arch/arm/kernel/efi.c @@ -75,38 +75,13 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) return 0; } -static unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata cpu_state_table = EFI_INVALID_TABLE_ADDR; const efi_config_table_type_t efi_arch_tables[] __initconst = { - {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table}, {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table}, {} }; -static void __init load_screen_info_table(void) -{ - struct screen_info *si; - - if (screen_info_table != EFI_INVALID_TABLE_ADDR) { - si = early_memremap_ro(screen_info_table, sizeof(*si)); - if (!si) { - pr_err("Could not map screen_info config table\n"); - return; - } - screen_info = *si; - early_memunmap(si, sizeof(*si)); - - /* dummycon on ARM needs non-zero values for columns/lines */ - screen_info.orig_video_cols = 80; - screen_info.orig_video_lines = 25; - - if (memblock_is_map_memory(screen_info.lfb_base)) - memblock_mark_nomap(screen_info.lfb_base, - screen_info.lfb_size); - } -} - static void __init load_cpu_state_table(void) { if (cpu_state_table != EFI_INVALID_TABLE_ADDR) { @@ -145,7 +120,11 @@ void __init arm_efi_init(void) { efi_init(); - load_screen_info_table(); + if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { + /* dummycon on ARM needs non-zero values for columns/lines */ + screen_info.orig_video_cols = 80; + screen_info.orig_video_lines = 25; + } /* ARM does not permit early mappings to persist across paging_init() */ efi_memmap_unmap(); diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 439e2bc5d5d8..31d13a6001df 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -14,8 +14,16 @@ #ifdef CONFIG_EFI extern void efi_init(void); + +bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg); #else #define efi_init() + +static inline +bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg) +{ + return false; +} #endif int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); @@ -25,6 +33,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ({ \ efi_virtmap_load(); \ __efi_fpsimd_begin(); \ + spin_lock(&efi_rt_lock); \ }) #undef arch_efi_call_virt @@ -33,10 +42,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); #define arch_efi_call_virt_teardown() \ ({ \ + spin_unlock(&efi_rt_lock); \ __efi_fpsimd_end(); \ efi_virtmap_unload(); \ }) +extern spinlock_t efi_rt_lock; efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) @@ -76,13 +87,23 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1)); } -#define alloc_screen_info(x...) &screen_info - -static inline void free_screen_info(struct screen_info *si) +static inline unsigned long efi_get_kimg_min_align(void) { + extern bool efi_nokaslr; + + /* + * Although relocatable kernels can fix up the misalignment with + * respect to MIN_KIMG_ALIGN, the resulting virtual text addresses are + * subtly out of sync with those recorded in the vmlinux when kaslr is + * disabled but the image required relocation anyway. Therefore retain + * 2M alignment if KASLR was explicitly disabled, even if it was not + * going to be activated to begin with. + */ + return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN; } #define EFI_ALLOC_ALIGN SZ_64K +#define EFI_ALLOC_LIMIT ((1UL << 48) - 1) /* * On ARM systems, virtually remapped UEFI runtime services are set up in two diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 8dd925f4a4c6..ceba6792f5b3 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -36,12 +36,6 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ syscall.o proton-pack.o idreg-override.o idle.o \ patching.o -targets += efi-entry.o - -OBJCOPYFLAGS := --prefix-symbols=__efistub_ -$(obj)/%.stub.o: $(obj)/%.o FORCE - $(call if_changed,objcopy) - obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ sys_compat.o obj-$(CONFIG_COMPAT) += sigreturn32.o @@ -57,8 +51,7 @@ obj-$(CONFIG_CPU_PM) += sleep.o suspend.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_EFI) += efi.o efi-entry.stub.o \ - efi-rt-wrapper.o +obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o obj-$(CONFIG_ACPI) += acpi.o diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S deleted file mode 100644 index 61a87fa1c305..000000000000 --- a/arch/arm64/kernel/efi-entry.S +++ /dev/null @@ -1,69 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * EFI entry point. - * - * Copyright (C) 2013, 2014 Red Hat, Inc. - * Author: Mark Salter <msalter@redhat.com> - */ -#include <linux/linkage.h> -#include <linux/init.h> - -#include <asm/assembler.h> - - __INIT - -SYM_CODE_START(efi_enter_kernel) - /* - * efi_pe_entry() will have copied the kernel image if necessary and we - * end up here with device tree address in x1 and the kernel entry - * point stored in x0. Save those values in registers which are - * callee preserved. - */ - ldr w2, =primary_entry_offset - add x19, x0, x2 // relocated Image entrypoint - mov x20, x1 // DTB address - - /* - * Clean the copied Image to the PoC, and ensure it is not shadowed by - * stale icache entries from before relocation. - */ - ldr w1, =kernel_size - add x1, x0, x1 - bl dcache_clean_poc - ic ialluis - - /* - * Clean the remainder of this routine to the PoC - * so that we can safely disable the MMU and caches. - */ - adr x0, 0f - adr x1, 3f - bl dcache_clean_poc -0: - /* Turn off Dcache and MMU */ - mrs x0, CurrentEL - cmp x0, #CurrentEL_EL2 - b.ne 1f - mrs x0, sctlr_el2 - bic x0, x0, #1 << 0 // clear SCTLR.M - bic x0, x0, #1 << 2 // clear SCTLR.C - pre_disable_mmu_workaround - msr sctlr_el2, x0 - isb - b 2f -1: - mrs x0, sctlr_el1 - bic x0, x0, #1 << 0 // clear SCTLR.M - bic x0, x0, #1 << 2 // clear SCTLR.C - pre_disable_mmu_workaround - msr sctlr_el1, x0 - isb -2: - /* Jump to kernel entry point */ - mov x0, x20 - mov x1, xzr - mov x2, xzr - mov x3, xzr - br x19 -3: -SYM_CODE_END(efi_enter_kernel) diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S index 75691a2641c1..a00886410537 100644 --- a/arch/arm64/kernel/efi-rt-wrapper.S +++ b/arch/arm64/kernel/efi-rt-wrapper.S @@ -6,7 +6,7 @@ #include <linux/linkage.h> SYM_FUNC_START(__efi_rt_asm_wrapper) - stp x29, x30, [sp, #-32]! + stp x29, x30, [sp, #-112]! mov x29, sp /* @@ -17,6 +17,22 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) stp x1, x18, [sp, #16] /* + * Preserve all callee saved registers and preserve the stack pointer + * value at the base of the EFI runtime stack so we can recover from + * synchronous exceptions occurring while executing the firmware + * routines. + */ + stp x19, x20, [sp, #32] + stp x21, x22, [sp, #48] + stp x23, x24, [sp, #64] + stp x25, x26, [sp, #80] + stp x27, x28, [sp, #96] + + ldr_l x16, efi_rt_stack_top + mov sp, x16 + stp x18, x29, [sp, #-16]! + + /* * We are lucky enough that no EFI runtime services take more than * 5 arguments, so all are passed in registers rather than via the * stack. @@ -29,9 +45,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) mov x4, x6 blr x8 + mov sp, x29 ldp x1, x2, [sp, #16] cmp x2, x18 - ldp x29, x30, [sp], #32 + ldp x29, x30, [sp], #112 b.ne 0f ret 0: @@ -42,6 +59,22 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) * called with preemption disabled and a separate shadow stack is used * for interrupts. */ - mov x18, x2 +#ifdef CONFIG_SHADOW_CALL_STACK + ldr_l x18, efi_rt_stack_top + ldr x18, [x18, #-16] +#endif + b efi_handle_corrupted_x18 // tail call SYM_FUNC_END(__efi_rt_asm_wrapper) + +SYM_CODE_START(__efi_rt_asm_recover) + mov sp, x30 + + ldp x19, x20, [sp, #32] + ldp x21, x22, [sp, #48] + ldp x23, x24, [sp, #64] + ldp x25, x26, [sp, #80] + ldp x27, x28, [sp, #96] + ldp x29, x30, [sp], #112 + ret +SYM_CODE_END(__efi_rt_asm_recover) diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index a908a37f0367..fab05de2e12d 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -144,3 +144,52 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f); return s; } + +DEFINE_SPINLOCK(efi_rt_lock); + +asmlinkage u64 *efi_rt_stack_top __ro_after_init; + +asmlinkage efi_status_t __efi_rt_asm_recover(void); + +bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg) +{ + /* Check whether the exception occurred while running the firmware */ + if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64) + return false; + + pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg); + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + regs->regs[0] = EFI_ABORTED; + regs->regs[30] = efi_rt_stack_top[-1]; + regs->pc = (u64)__efi_rt_asm_recover; + + if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) + regs->regs[18] = efi_rt_stack_top[-2]; + + return true; +} + +/* EFI requires 8 KiB of stack space for runtime services */ +static_assert(THREAD_SIZE >= SZ_8K); + +static int __init arm64_efi_rt_init(void) +{ + void *p; + + if (!efi_enabled(EFI_RUNTIME_SERVICES)) + return 0; + + p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL, + NUMA_NO_NODE, &&l); +l: if (!p) { + pr_warn("Failed to allocate EFI runtime stack\n"); + clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); + return -ENOMEM; + } + + efi_rt_stack_top = p + THREAD_SIZE; + return 0; +} +core_initcall(arm64_efi_rt_init); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 8151412653de..f31130ba0233 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -10,7 +10,6 @@ #error This file should only be included in vmlinux.lds.S #endif -PROVIDE(__efistub_kernel_size = _edata - _text); PROVIDE(__efistub_primary_entry_offset = primary_entry - _text); /* @@ -22,13 +21,6 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text); * linked at. The routines below are all implemented in assembler in a * position independent manner */ -PROVIDE(__efistub_memcmp = __pi_memcmp); -PROVIDE(__efistub_memchr = __pi_memchr); -PROVIDE(__efistub_strlen = __pi_strlen); -PROVIDE(__efistub_strnlen = __pi_strnlen); -PROVIDE(__efistub_strcmp = __pi_strcmp); -PROVIDE(__efistub_strncmp = __pi_strncmp); -PROVIDE(__efistub_strrchr = __pi_strrchr); PROVIDE(__efistub_dcache_clean_poc = __pi_dcache_clean_poc); PROVIDE(__efistub__text = _text); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 74f76514a48d..3eb2825d08cf 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -30,6 +30,7 @@ #include <asm/bug.h> #include <asm/cmpxchg.h> #include <asm/cpufeature.h> +#include <asm/efi.h> #include <asm/exception.h> #include <asm/daifflags.h> #include <asm/debug-monitors.h> @@ -397,6 +398,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned long esr, msg = "paging request"; } + if (efi_runtime_fixup_exception(regs, msg)) + return; + die_kernel_fault(msg, addr, esr, regs); } diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h index 174567b00ddb..97f16e60c6ff 100644 --- a/arch/loongarch/include/asm/efi.h +++ b/arch/loongarch/include/asm/efi.h @@ -19,18 +19,18 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt); #define EFI_ALLOC_ALIGN SZ_64K #define EFI_RT_VIRTUAL_OFFSET CSR_DMW0_BASE -static inline struct screen_info *alloc_screen_info(void) +static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) { - return &screen_info; + return ULONG_MAX; } -static inline void free_screen_info(struct screen_info *si) +static inline unsigned long efi_get_kimg_min_align(void) { + return SZ_2M; } -static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) -{ - return ULONG_MAX; -} +#define EFI_KIMG_PREFERRED_ADDRESS PHYSADDR(VMLINUX_LOAD_ADDRESS) + +unsigned long kernel_entry_address(void); #endif /* _ASM_LOONGARCH_EFI_H */ diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index a31329971133..d75ce73e8ff8 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -52,6 +52,27 @@ void __init efi_runtime_init(void) set_bit(EFI_RUNTIME_SERVICES, &efi.flags); } +unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR; + +static void __init init_screen_info(void) +{ + struct screen_info *si; + + if (screen_info_table == EFI_INVALID_TABLE_ADDR) + return; + + si = early_memremap(screen_info_table, sizeof(*si)); + if (!si) { + pr_err("Could not map screen_info config table\n"); + return; + } + screen_info = *si; + memset(si, 0, sizeof(*si)); + early_memunmap(si, sizeof(*si)); + + memblock_reserve(screen_info.lfb_base, screen_info.lfb_size); +} + void __init efi_init(void) { int size; @@ -80,8 +101,7 @@ void __init efi_init(void) set_bit(EFI_CONFIG_TABLES, &efi.flags); - if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) - memblock_reserve(screen_info.lfb_base, screen_info.lfb_size); + init_screen_info(); if (boot_memmap == EFI_INVALID_TABLE_ADDR) return; diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S index 84970e266658..57bada6b4e93 100644 --- a/arch/loongarch/kernel/head.S +++ b/arch/loongarch/kernel/head.S @@ -25,7 +25,8 @@ _head: .dword kernel_entry /* Kernel entry point */ .dword _end - _text /* Kernel image effective size */ .quad 0 /* Kernel image load offset from start of RAM */ - .org 0x3c /* 0x20 ~ 0x3b reserved */ + .org 0x38 /* 0x20 ~ 0x37 reserved */ + .long LINUX_PE_MAGIC .long pe_header - _head /* Offset to the PE header */ pe_header: diff --git a/arch/loongarch/kernel/image-vars.h b/arch/loongarch/kernel/image-vars.h index 88f5d81702df..e561989d02de 100644 --- a/arch/loongarch/kernel/image-vars.h +++ b/arch/loongarch/kernel/image-vars.h @@ -7,15 +7,7 @@ #ifdef CONFIG_EFI_STUB -__efistub_memcmp = memcmp; -__efistub_memchr = memchr; -__efistub_strcat = strcat; __efistub_strcmp = strcmp; -__efistub_strlen = strlen; -__efistub_strncat = strncat; -__efistub_strnstr = strnstr; -__efistub_strnlen = strnlen; -__efistub_strrchr = strrchr; __efistub_kernel_entry = kernel_entry; __efistub_kernel_asize = kernel_asize; __efistub_kernel_fsize = kernel_fsize; diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h index e229d7be4b66..47d3ab0fcc36 100644 --- a/arch/riscv/include/asm/efi.h +++ b/arch/riscv/include/asm/efi.h @@ -35,13 +35,20 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) return ULONG_MAX; } -#define alloc_screen_info(x...) (&screen_info) - -static inline void free_screen_info(struct screen_info *si) +static inline unsigned long efi_get_kimg_min_align(void) { + /* + * RISC-V requires the kernel image to placed 2 MB aligned base for 64 + * bit and 4MB for 32 bit. + */ + return IS_ENABLED(CONFIG_64BIT) ? SZ_2M : SZ_4M; } +#define EFI_KIMG_PREFERRED_ADDRESS efi_get_kimg_min_align() + void efi_virtmap_load(void); void efi_virtmap_unload(void); +unsigned long stext_offset(void); + #endif /* _ASM_EFI_H */ diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h index d6e5f739905e..7e2962ef73f9 100644 --- a/arch/riscv/kernel/image-vars.h +++ b/arch/riscv/kernel/image-vars.h @@ -23,13 +23,7 @@ * linked at. The routines below are all implemented in assembler in a * position independent manner */ -__efistub_memcmp = memcmp; -__efistub_memchr = memchr; -__efistub_strlen = strlen; -__efistub_strnlen = strnlen; __efistub_strcmp = strcmp; -__efistub_strncmp = strncmp; -__efistub_strrchr = strrchr; __efistub__start = _start; __efistub__start_kernel = _start_kernel; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 276b3baf1c50..e9ede4c4c27f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1995,6 +1995,37 @@ config EFI_MIXED If unsure, say N. +config EFI_FAKE_MEMMAP + bool "Enable EFI fake memory map" + depends on EFI + help + Saying Y here will enable "efi_fake_mem" boot option. By specifying + this parameter, you can add arbitrary attribute to specific memory + range by updating original (firmware provided) EFI memmap. This is + useful for debugging of EFI memmap related feature, e.g., Address + Range Mirroring feature. + +config EFI_MAX_FAKE_MEM + int "maximum allowable number of ranges in efi_fake_mem boot option" + depends on EFI_FAKE_MEMMAP + range 1 128 + default 8 + help + Maximum allowable number of ranges in efi_fake_mem boot option. + Ranges can be set up to this value using comma-separated list. + The default value is 8. + +config EFI_RUNTIME_MAP + bool "Export EFI runtime maps to sysfs" if EXPERT + depends on EFI + default KEXEC_CORE + help + Export EFI runtime memory regions to /sys/firmware/efi/runtime-map. + That memory map is required by the 2nd kernel to set up EFI virtual + mappings after kexec, but can also be used for debugging purposes. + + See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. + source "kernel/Kconfig.hz" config KEXEC diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S index 67e7edcdfea8..0c988f2a1243 100644 --- a/arch/x86/boot/compressed/efi_thunk_64.S +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -93,12 +93,6 @@ SYM_FUNC_START(__efi64_thunk) movl %ebx, %fs movl %ebx, %gs - /* - * Convert 32-bit status code into 64-bit. - */ - roll $1, %eax - rorq $1, %rax - pop %rbx pop %rbp RET diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index f912d7770130..be8f78a7ee32 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -80,10 +80,11 @@ bs_die: ljmp $0xf000,$0xfff0 #ifdef CONFIG_EFI_STUB - .org 0x3c + .org 0x38 # # Offset to the PE header. # + .long LINUX_PE_MAGIC .long pe_header #endif /* CONFIG_EFI_STUB */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 233ae6986d6f..a63154e049d7 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -178,7 +178,7 @@ struct efi_setup_data { extern u64 efi_setup; #ifdef CONFIG_EFI -extern efi_status_t __efi64_thunk(u32, ...); +extern u64 __efi64_thunk(u32, ...); #define efi64_thunk(...) ({ \ u64 __pad[3]; /* must have space for 3 args on the stack */ \ @@ -228,16 +228,15 @@ static inline bool efi_is_native(void) return efi_is_64bit(); } -#define efi_mixed_mode_cast(attr) \ - __builtin_choose_expr( \ - __builtin_types_compatible_p(u32, __typeof__(attr)), \ - (unsigned long)(attr), (attr)) - #define efi_table_attr(inst, attr) \ - (efi_is_native() \ - ? inst->attr \ - : (__typeof__(inst->attr)) \ - efi_mixed_mode_cast(inst->mixed_mode.attr)) + (efi_is_native() ? (inst)->attr \ + : efi_mixed_table_attr((inst), attr)) + +#define efi_mixed_table_attr(inst, attr) \ + (__typeof__(inst->attr)) \ + _Generic(inst->mixed_mode.attr, \ + u32: (unsigned long)(inst->mixed_mode.attr), \ + default: (inst->mixed_mode.attr)) /* * The following macros allow translating arguments if necessary from native to @@ -325,6 +324,17 @@ static inline u32 efi64_convert_status(efi_status_t status) #define __efi64_argmap_set_memory_space_attributes(phys, size, flags) \ (__efi64_split(phys), __efi64_split(size), __efi64_split(flags)) +/* file protocol */ +#define __efi64_argmap_open(prot, newh, fname, mode, attr) \ + ((prot), efi64_zero_upper(newh), (fname), __efi64_split(mode), \ + __efi64_split(attr)) + +#define __efi64_argmap_set_position(pos) (__efi64_split(pos)) + +/* file system protocol */ +#define __efi64_argmap_open_volume(prot, file) \ + ((prot), efi64_zero_upper(file)) + /* * The macros below handle the plumbing for the argument mapping. To add a * mapping for a specific EFI method, simply define a macro @@ -344,31 +354,27 @@ static inline u32 efi64_convert_status(efi_status_t status) #define __efi_eat(...) #define __efi_eval(...) __VA_ARGS__ -/* The three macros below handle dispatching via the thunk if needed */ - -#define efi_call_proto(inst, func, ...) \ - (efi_is_native() \ - ? inst->func(inst, ##__VA_ARGS__) \ - : __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__)) +static inline efi_status_t __efi64_widen_efi_status(u64 status) +{ + /* use rotate to move the value of bit #31 into position #63 */ + return ror64(rol32(status, 1), 1); +} -#define efi_bs_call(func, ...) \ - (efi_is_native() \ - ? efi_system_table->boottime->func(__VA_ARGS__) \ - : __efi64_thunk_map(efi_table_attr(efi_system_table, \ - boottime), \ - func, __VA_ARGS__)) +/* The macro below handles dispatching via the thunk if needed */ -#define efi_rt_call(func, ...) \ - (efi_is_native() \ - ? efi_system_table->runtime->func(__VA_ARGS__) \ - : __efi64_thunk_map(efi_table_attr(efi_system_table, \ - runtime), \ - func, __VA_ARGS__)) +#define efi_fn_call(inst, func, ...) \ + (efi_is_native() ? (inst)->func(__VA_ARGS__) \ + : efi_mixed_call((inst), func, ##__VA_ARGS__)) -#define efi_dxe_call(func, ...) \ - (efi_is_native() \ - ? efi_dxe_table->func(__VA_ARGS__) \ - : __efi64_thunk_map(efi_dxe_table, func, __VA_ARGS__)) +#define efi_mixed_call(inst, func, ...) \ + _Generic(inst->func(__VA_ARGS__), \ + efi_status_t: \ + __efi64_widen_efi_status( \ + __efi64_thunk_map(inst, func, ##__VA_ARGS__)), \ + u64: ({ BUILD_BUG(); ULONG_MAX; }), \ + default: \ + (__typeof__(inst->func(__VA_ARGS__))) \ + __efi64_thunk_map(inst, func, ##__VA_ARGS__)) #else /* CONFIG_EFI_MIXED */ @@ -400,13 +406,52 @@ static inline void efi_reserve_boot_services(void) #ifdef CONFIG_EFI_FAKE_MEMMAP extern void __init efi_fake_memmap_early(void); +extern void __init efi_fake_memmap(void); #else static inline void efi_fake_memmap_early(void) { } + +static inline void efi_fake_memmap(void) +{ +} #endif +extern int __init efi_memmap_alloc(unsigned int num_entries, + struct efi_memory_map_data *data); +extern void __efi_memmap_free(u64 phys, unsigned long size, + unsigned long flags); +#define __efi_memmap_free __efi_memmap_free + +extern int __init efi_memmap_install(struct efi_memory_map_data *data); +extern int __init efi_memmap_split_count(efi_memory_desc_t *md, + struct range *range); +extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap, + void *buf, struct efi_mem_range *mem); + #define arch_ima_efi_boot_mode \ ({ extern struct boot_params boot_params; boot_params.secure_boot; }) +#ifdef CONFIG_EFI_RUNTIME_MAP +int efi_get_runtime_map_size(void); +int efi_get_runtime_map_desc_size(void); +int efi_runtime_map_copy(void *buf, size_t bufsz); +#else +static inline int efi_get_runtime_map_size(void) +{ + return 0; +} + +static inline int efi_get_runtime_map_desc_size(void) +{ + return 0; +} + +static inline int efi_runtime_map_copy(void *buf, size_t bufsz) +{ + return 0; +} + +#endif + #endif /* _ASM_X86_EFI_H */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 216fee7144ee..41ec3a69f3c7 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -31,6 +31,7 @@ #include <xen/xen.h> #include <asm/apic.h> +#include <asm/efi.h> #include <asm/numa.h> #include <asm/bios_ebda.h> #include <asm/bugs.h> diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile index a50245157685..543df9a1379d 100644 --- a/arch/x86/platform/efi/Makefile +++ b/arch/x86/platform/efi/Makefile @@ -2,5 +2,8 @@ KASAN_SANITIZE := n GCOV_PROFILE := n -obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o +obj-$(CONFIG_EFI) += memmap.o quirks.o efi.o efi_$(BITS).o \ + efi_stub_$(BITS).o obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o +obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o +obj-$(CONFIG_EFI_RUNTIME_MAP) += runtime-map.o diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index ebc98a68c400..7e51c14a1ef0 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -214,9 +214,11 @@ int __init efi_memblock_x86_reserve_range(void) data.desc_size = e->efi_memdesc_size; data.desc_version = e->efi_memdesc_version; - rv = efi_memmap_init_early(&data); - if (rv) - return rv; + if (!efi_enabled(EFI_PARAVIRT)) { + rv = efi_memmap_init_early(&data); + if (rv) + return rv; + } if (add_efi_memmap || do_efi_soft_reserve()) do_add_efi_memmap(); diff --git a/arch/x86/platform/efi/fake_mem.c b/arch/x86/platform/efi/fake_mem.c new file mode 100644 index 000000000000..41d57cad3d84 --- /dev/null +++ b/arch/x86/platform/efi/fake_mem.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * fake_mem.c + * + * Copyright (C) 2015 FUJITSU LIMITED + * Author: Taku Izumi <izumi.taku@jp.fujitsu.com> + * + * This code introduces new boot option named "efi_fake_mem" + * By specifying this parameter, you can add arbitrary attribute to + * specific memory range by updating original (firmware provided) EFI + * memmap. + */ + +#include <linux/kernel.h> +#include <linux/efi.h> +#include <linux/init.h> +#include <linux/memblock.h> +#include <linux/types.h> +#include <linux/sort.h> +#include <asm/e820/api.h> +#include <asm/efi.h> + +#define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM + +static struct efi_mem_range efi_fake_mems[EFI_MAX_FAKEMEM]; +static int nr_fake_mem; + +static int __init cmp_fake_mem(const void *x1, const void *x2) +{ + const struct efi_mem_range *m1 = x1; + const struct efi_mem_range *m2 = x2; + + if (m1->range.start < m2->range.start) + return -1; + if (m1->range.start > m2->range.start) + return 1; + return 0; +} + +static void __init efi_fake_range(struct efi_mem_range *efi_range) +{ + struct efi_memory_map_data data = { 0 }; + int new_nr_map = efi.memmap.nr_map; + efi_memory_desc_t *md; + void *new_memmap; + + /* count up the number of EFI memory descriptor */ + for_each_efi_memory_desc(md) + new_nr_map += efi_memmap_split_count(md, &efi_range->range); + + /* allocate memory for new EFI memmap */ + if (efi_memmap_alloc(new_nr_map, &data) != 0) + return; + + /* create new EFI memmap */ + new_memmap = early_memremap(data.phys_map, data.size); + if (!new_memmap) { + __efi_memmap_free(data.phys_map, data.size, data.flags); + return; + } + + efi_memmap_insert(&efi.memmap, new_memmap, efi_range); + + /* swap into new EFI memmap */ + early_memunmap(new_memmap, data.size); + + efi_memmap_install(&data); +} + +void __init efi_fake_memmap(void) +{ + int i; + + if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) + return; + + for (i = 0; i < nr_fake_mem; i++) + efi_fake_range(&efi_fake_mems[i]); + + /* print new EFI memmap */ + efi_print_memmap(); +} + +static int __init setup_fake_mem(char *p) +{ + u64 start = 0, mem_size = 0, attribute = 0; + int i; + + if (!p) + return -EINVAL; + + while (*p != '\0') { + mem_size = memparse(p, &p); + if (*p == '@') + start = memparse(p+1, &p); + else + break; + + if (*p == ':') + attribute = simple_strtoull(p+1, &p, 0); + else + break; + + if (nr_fake_mem >= EFI_MAX_FAKEMEM) + break; + + efi_fake_mems[nr_fake_mem].range.start = start; + efi_fake_mems[nr_fake_mem].range.end = start + mem_size - 1; + efi_fake_mems[nr_fake_mem].attribute = attribute; + nr_fake_mem++; + + if (*p == ',') + p++; + } + + sort(efi_fake_mems, nr_fake_mem, sizeof(struct efi_mem_range), + cmp_fake_mem, NULL); + + for (i = 0; i < nr_fake_mem; i++) + pr_info("efi_fake_mem: add attr=0x%016llx to [mem 0x%016llx-0x%016llx]", + efi_fake_mems[i].attribute, efi_fake_mems[i].range.start, + efi_fake_mems[i].range.end); + + return *p == '\0' ? 0 : -EINVAL; +} + +early_param("efi_fake_mem", setup_fake_mem); + +void __init efi_fake_memmap_early(void) +{ + int i; + + /* + * The late efi_fake_mem() call can handle all requests if + * EFI_MEMORY_SP support is disabled. + */ + if (!efi_soft_reserve_enabled()) + return; + + if (!efi_enabled(EFI_MEMMAP) || !nr_fake_mem) + return; + + /* + * Given that efi_fake_memmap() needs to perform memblock + * allocations it needs to run after e820__memblock_setup(). + * However, if efi_fake_mem specifies EFI_MEMORY_SP for a given + * address range that potentially needs to mark the memory as + * reserved prior to e820__memblock_setup(). Update e820 + * directly if EFI_MEMORY_SP is specified for an + * EFI_CONVENTIONAL_MEMORY descriptor. + */ + for (i = 0; i < nr_fake_mem; i++) { + struct efi_mem_range *mem = &efi_fake_mems[i]; + efi_memory_desc_t *md; + u64 m_start, m_end; + + if ((mem->attribute & EFI_MEMORY_SP) == 0) + continue; + + m_start = mem->range.start; + m_end = mem->range.end; + for_each_efi_memory_desc(md) { + u64 start, end, size; + + if (md->type != EFI_CONVENTIONAL_MEMORY) + continue; + + start = md->phys_addr; + end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; + + if (m_start <= end && m_end >= start) + /* fake range overlaps descriptor */; + else + continue; + + /* + * Trim the boundary of the e820 update to the + * descriptor in case the fake range overlaps + * !EFI_CONVENTIONAL_MEMORY + */ + start = max(start, m_start); + end = min(end, m_end); + size = end - start + 1; + + if (end <= start) + continue; + + /* + * Ensure each efi_fake_mem instance results in + * a unique e820 resource + */ + e820__range_remove(start, size, E820_TYPE_RAM, 1); + e820__range_add(start, size, E820_TYPE_SOFT_RESERVED); + e820__update_table(e820_table); + } + } +} diff --git a/arch/x86/platform/efi/memmap.c b/arch/x86/platform/efi/memmap.c new file mode 100644 index 000000000000..c69f8471e6d0 --- /dev/null +++ b/arch/x86/platform/efi/memmap.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Common EFI memory map functions. + */ + +#define pr_fmt(fmt) "efi: " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/efi.h> +#include <linux/io.h> +#include <asm/early_ioremap.h> +#include <asm/efi.h> +#include <linux/memblock.h> +#include <linux/slab.h> + +static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size) +{ + return memblock_phys_alloc(size, SMP_CACHE_BYTES); +} + +static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) +{ + unsigned int order = get_order(size); + struct page *p = alloc_pages(GFP_KERNEL, order); + + if (!p) + return 0; + + return PFN_PHYS(page_to_pfn(p)); +} + +void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags) +{ + if (flags & EFI_MEMMAP_MEMBLOCK) { + if (slab_is_available()) + memblock_free_late(phys, size); + else + memblock_phys_free(phys, size); + } else if (flags & EFI_MEMMAP_SLAB) { + struct page *p = pfn_to_page(PHYS_PFN(phys)); + unsigned int order = get_order(size); + + free_pages((unsigned long) page_address(p), order); + } +} + +/** + * efi_memmap_alloc - Allocate memory for the EFI memory map + * @num_entries: Number of entries in the allocated map. + * @data: efi memmap installation parameters + * + * Depending on whether mm_init() has already been invoked or not, + * either memblock or "normal" page allocation is used. + * + * Returns zero on success, a negative error code on failure. + */ +int __init efi_memmap_alloc(unsigned int num_entries, + struct efi_memory_map_data *data) +{ + /* Expect allocation parameters are zero initialized */ + WARN_ON(data->phys_map || data->size); + + data->size = num_entries * efi.memmap.desc_size; + data->desc_version = efi.memmap.desc_version; + data->desc_size = efi.memmap.desc_size; + data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK); + data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE; + + if (slab_is_available()) { + data->flags |= EFI_MEMMAP_SLAB; + data->phys_map = __efi_memmap_alloc_late(data->size); + } else { + data->flags |= EFI_MEMMAP_MEMBLOCK; + data->phys_map = __efi_memmap_alloc_early(data->size); + } + + if (!data->phys_map) + return -ENOMEM; + return 0; +} + +/** + * efi_memmap_install - Install a new EFI memory map in efi.memmap + * @ctx: map allocation parameters (address, size, flags) + * + * Unlike efi_memmap_init_*(), this function does not allow the caller + * to switch from early to late mappings. It simply uses the existing + * mapping function and installs the new memmap. + * + * Returns zero on success, a negative error code on failure. + */ +int __init efi_memmap_install(struct efi_memory_map_data *data) +{ + efi_memmap_unmap(); + + if (efi_enabled(EFI_PARAVIRT)) + return 0; + + return __efi_memmap_init(data); +} + +/** + * efi_memmap_split_count - Count number of additional EFI memmap entries + * @md: EFI memory descriptor to split + * @range: Address range (start, end) to split around + * + * Returns the number of additional EFI memmap entries required to + * accommodate @range. + */ +int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range) +{ + u64 m_start, m_end; + u64 start, end; + int count = 0; + + start = md->phys_addr; + end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; + + /* modifying range */ + m_start = range->start; + m_end = range->end; + + if (m_start <= start) { + /* split into 2 parts */ + if (start < m_end && m_end < end) + count++; + } + + if (start < m_start && m_start < end) { + /* split into 3 parts */ + if (m_end < end) + count += 2; + /* split into 2 parts */ + if (end <= m_end) + count++; + } + + return count; +} + +/** + * efi_memmap_insert - Insert a memory region in an EFI memmap + * @old_memmap: The existing EFI memory map structure + * @buf: Address of buffer to store new map + * @mem: Memory map entry to insert + * + * It is suggested that you call efi_memmap_split_count() first + * to see how large @buf needs to be. + */ +void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf, + struct efi_mem_range *mem) +{ + u64 m_start, m_end, m_attr; + efi_memory_desc_t *md; + u64 start, end; + void *old, *new; + + /* modifying range */ + m_start = mem->range.start; + m_end = mem->range.end; + m_attr = mem->attribute; + + /* + * The EFI memory map deals with regions in EFI_PAGE_SIZE + * units. Ensure that the region described by 'mem' is aligned + * correctly. + */ + if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) || + !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) { + WARN_ON(1); + return; + } + + for (old = old_memmap->map, new = buf; + old < old_memmap->map_end; + old += old_memmap->desc_size, new += old_memmap->desc_size) { + + /* copy original EFI memory descriptor */ + memcpy(new, old, old_memmap->desc_size); + md = new; + start = md->phys_addr; + end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; + + if (m_start <= start && end <= m_end) + md->attribute |= m_attr; + + if (m_start <= start && + (start < m_end && m_end < end)) { + /* first part */ + md->attribute |= m_attr; + md->num_pages = (m_end - md->phys_addr + 1) >> + EFI_PAGE_SHIFT; + /* latter part */ + new += old_memmap->desc_size; + memcpy(new, old, old_memmap->desc_size); + md = new; + md->phys_addr = m_end + 1; + md->num_pages = (end - md->phys_addr + 1) >> + EFI_PAGE_SHIFT; + } + + if ((start < m_start && m_start < end) && m_end < end) { + /* first part */ + md->num_pages = (m_start - md->phys_addr) >> + EFI_PAGE_SHIFT; + /* middle part */ + new += old_memmap->desc_size; + memcpy(new, old, old_memmap->desc_size); + md = new; + md->attribute |= m_attr; + md->phys_addr = m_start; + md->num_pages = (m_end - m_start + 1) >> + EFI_PAGE_SHIFT; + /* last part */ + new += old_memmap->desc_size; + memcpy(new, old, old_memmap->desc_size); + md = new; + md->phys_addr = m_end + 1; + md->num_pages = (end - m_end) >> + EFI_PAGE_SHIFT; + } + + if ((start < m_start && m_start < end) && + (end <= m_end)) { + /* first part */ + md->num_pages = (m_start - md->phys_addr) >> + EFI_PAGE_SHIFT; + /* latter part */ + new += old_memmap->desc_size; + memcpy(new, old, old_memmap->desc_size); + md = new; + md->phys_addr = m_start; + md->num_pages = (end - md->phys_addr + 1) >> + EFI_PAGE_SHIFT; + md->attribute |= m_attr; + } + } +} diff --git a/arch/x86/platform/efi/runtime-map.c b/arch/x86/platform/efi/runtime-map.c new file mode 100644 index 000000000000..bbee682ef8cd --- /dev/null +++ b/arch/x86/platform/efi/runtime-map.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2013 Red Hat, Inc., Dave Young <dyoung@redhat.com> + */ + +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/efi.h> +#include <linux/slab.h> + +#include <asm/efi.h> +#include <asm/setup.h> + +struct efi_runtime_map_entry { + efi_memory_desc_t md; + struct kobject kobj; /* kobject for each entry */ +}; + +static struct efi_runtime_map_entry **map_entries; + +struct map_attribute { + struct attribute attr; + ssize_t (*show)(struct efi_runtime_map_entry *entry, char *buf); +}; + +static inline struct map_attribute *to_map_attr(struct attribute *attr) +{ + return container_of(attr, struct map_attribute, attr); +} + +static ssize_t type_show(struct efi_runtime_map_entry *entry, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "0x%x\n", entry->md.type); +} + +#define EFI_RUNTIME_FIELD(var) entry->md.var + +#define EFI_RUNTIME_U64_ATTR_SHOW(name) \ +static ssize_t name##_show(struct efi_runtime_map_entry *entry, char *buf) \ +{ \ + return snprintf(buf, PAGE_SIZE, "0x%llx\n", EFI_RUNTIME_FIELD(name)); \ +} + +EFI_RUNTIME_U64_ATTR_SHOW(phys_addr); +EFI_RUNTIME_U64_ATTR_SHOW(virt_addr); +EFI_RUNTIME_U64_ATTR_SHOW(num_pages); +EFI_RUNTIME_U64_ATTR_SHOW(attribute); + +static inline struct efi_runtime_map_entry *to_map_entry(struct kobject *kobj) +{ + return container_of(kobj, struct efi_runtime_map_entry, kobj); +} + +static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct efi_runtime_map_entry *entry = to_map_entry(kobj); + struct map_attribute *map_attr = to_map_attr(attr); + + return map_attr->show(entry, buf); +} + +static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400); +static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400); +static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400); +static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400); +static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400); + +/* + * These are default attributes that are added for every memmap entry. + */ +static struct attribute *def_attrs[] = { + &map_type_attr.attr, + &map_phys_addr_attr.attr, + &map_virt_addr_attr.attr, + &map_num_pages_attr.attr, + &map_attribute_attr.attr, + NULL +}; +ATTRIBUTE_GROUPS(def); + +static const struct sysfs_ops map_attr_ops = { + .show = map_attr_show, +}; + +static void map_release(struct kobject *kobj) +{ + struct efi_runtime_map_entry *entry; + + entry = to_map_entry(kobj); + kfree(entry); +} + +static struct kobj_type __refdata map_ktype = { + .sysfs_ops = &map_attr_ops, + .default_groups = def_groups, + .release = map_release, +}; + +static struct kset *map_kset; + +static struct efi_runtime_map_entry * +add_sysfs_runtime_map_entry(struct kobject *kobj, int nr, + efi_memory_desc_t *md) +{ + int ret; + struct efi_runtime_map_entry *entry; + + if (!map_kset) { + map_kset = kset_create_and_add("runtime-map", NULL, kobj); + if (!map_kset) + return ERR_PTR(-ENOMEM); + } + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + kset_unregister(map_kset); + map_kset = NULL; + return ERR_PTR(-ENOMEM); + } + + memcpy(&entry->md, md, sizeof(efi_memory_desc_t)); + + kobject_init(&entry->kobj, &map_ktype); + entry->kobj.kset = map_kset; + ret = kobject_add(&entry->kobj, NULL, "%d", nr); + if (ret) { + kobject_put(&entry->kobj); + kset_unregister(map_kset); + map_kset = NULL; + return ERR_PTR(ret); + } + + return entry; +} + +int efi_get_runtime_map_size(void) +{ + return efi.memmap.nr_map * efi.memmap.desc_size; +} + +int efi_get_runtime_map_desc_size(void) +{ + return efi.memmap.desc_size; +} + +int efi_runtime_map_copy(void *buf, size_t bufsz) +{ + size_t sz = efi_get_runtime_map_size(); + + if (sz > bufsz) + sz = bufsz; + + memcpy(buf, efi.memmap.map, sz); + return 0; +} + +static int __init efi_runtime_map_init(void) +{ + int i, j, ret = 0; + struct efi_runtime_map_entry *entry; + efi_memory_desc_t *md; + + if (!efi_enabled(EFI_MEMMAP) || !efi_kobj) + return 0; + + map_entries = kcalloc(efi.memmap.nr_map, sizeof(entry), GFP_KERNEL); + if (!map_entries) { + ret = -ENOMEM; + goto out; + } + + i = 0; + for_each_efi_memory_desc(md) { + entry = add_sysfs_runtime_map_entry(efi_kobj, i, md); + if (IS_ERR(entry)) { + ret = PTR_ERR(entry); + goto out_add_entry; + } + *(map_entries + i++) = entry; + } + + return 0; +out_add_entry: + for (j = i - 1; j >= 0; j--) { + entry = *(map_entries + j); + kobject_put(&entry->kobj); + } +out: + return ret; +} +subsys_initcall_sync(efi_runtime_map_init); |