From 64e1f5872a8c3d80bce4686b4ab5dbc6e6bd30c5 Mon Sep 17 00:00:00 2001 From: Pavel Skripkin Date: Thu, 6 May 2021 22:07:26 +0300 Subject: x86/alternatives: Make the x86nops[] symbol static Sparse says: arch/x86/kernel/alternative.c:78:21: warning: symbol 'x86nops' was not declared. Should it be static? Since x86nops[] is not used outside this file, Sparse is right and it can be made static. Signed-off-by: Pavel Skripkin Signed-off-by: Ingo Molnar Link: https://lore.kernel.org/r/20210506190726.15575-1-paskripkin@gmail.com --- arch/x86/kernel/alternative.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 6974b5174495..75c752b0628c 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -75,7 +75,7 @@ do { \ } \ } while (0) -const unsigned char x86nops[] = +static const unsigned char x86nops[] = { BYTES_NOP1, BYTES_NOP2, -- cgit v1.2.3 From 1bc67873d401e6c2e6e30be7fef21337db07a042 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 12 May 2021 11:33:10 +0200 Subject: x86/asm: Simplify __smp_mb() definition Drop the bitness ifdeffery in favor of using _ASM_SP, which is the helper macro for the rSP register specification for 32 and 64 bit depending on the build. No functional changes. Signed-off-by: Borislav Petkov Signed-off-by: Ingo Molnar Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210512093310.5635-1-bp@alien8.de --- arch/x86/include/asm/barrier.h | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 4819d5e5a335..3ba772a69cc8 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -54,11 +54,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, #define dma_rmb() barrier() #define dma_wmb() barrier() -#ifdef CONFIG_X86_32 -#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc") -#else -#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc") -#endif +#define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc") + #define __smp_rmb() dma_rmb() #define __smp_wmb() barrier() #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) -- cgit v1.2.3 From c43426334b3169b6c9e6855483aa7384ff09fd33 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 12 May 2021 19:58:31 +0200 Subject: x86: Fix leftover comment typos Signed-off-by: Ingo Molnar --- arch/x86/hyperv/hv_init.c | 2 +- arch/x86/include/asm/sgx.h | 2 +- arch/x86/include/asm/stackprotector.h | 2 +- arch/x86/kernel/kprobes/core.c | 2 +- arch/x86/kvm/mmu/mmu.c | 2 +- arch/x86/kvm/mmu/tdp_mmu.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index bb0ae4b5c00f..256ad0e34dd2 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -623,7 +623,7 @@ bool hv_query_ext_cap(u64 cap_query) * output parameter to the hypercall below and so it should be * compatible with 'virt_to_phys'. Which means, it's address should be * directly mapped. Use 'static' to keep it compatible; stack variables - * can be virtually mapped, making them imcompatible with + * can be virtually mapped, making them incompatible with * 'virt_to_phys'. * Hypercall input/output addresses should also be 8-byte aligned. */ diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h index 9c31e0ebc55b..05f3e21f01a7 100644 --- a/arch/x86/include/asm/sgx.h +++ b/arch/x86/include/asm/sgx.h @@ -13,7 +13,7 @@ /* * This file contains both data structures defined by SGX architecture and Linux * defined software data structures and functions. The two should not be mixed - * together for better readibility. The architectural definitions come first. + * together for better readability. The architectural definitions come first. */ /* The SGX specific CPUID function. */ diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index b6ffe58c70fa..24a8d6c4fb18 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -11,7 +11,7 @@ * The same segment is shared by percpu area and stack canary. On * x86_64, percpu symbols are zero based and %gs (64-bit) points to the * base of percpu area. The first occupant of the percpu area is always - * fixed_percpu_data which contains stack_canary at the approproate + * fixed_percpu_data which contains stack_canary at the appropriate * offset. On x86_32, the stack canary is just a regular percpu * variable. * diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index d3d65545cb8b..7c4d0736a998 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -674,7 +674,7 @@ static int prepare_emulation(struct kprobe *p, struct insn *insn) break; if (insn->addr_bytes != sizeof(unsigned long)) - return -EOPNOTSUPP; /* Don't support differnt size */ + return -EOPNOTSUPP; /* Don't support different size */ if (X86_MODRM_MOD(opcode) != 3) return -EOPNOTSUPP; /* TODO: support memory addressing */ diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 0144c40d09c7..5e60b00e8e50 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -2374,7 +2374,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu) * page is available, while the caller may end up allocating as many as * four pages, e.g. for PAE roots or for 5-level paging. Temporarily * exceeding the (arbitrary by default) limit will not harm the host, - * being too agressive may unnecessarily kill the guest, and getting an + * being too aggressive may unnecessarily kill the guest, and getting an * exact count is far more trouble than it's worth, especially in the * page fault paths. */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 95eeb5ac6a8a..e09cb1f97800 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1017,7 +1017,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (!is_shadow_present_pte(iter.old_spte)) { /* - * If SPTE has been forzen by another thread, just + * If SPTE has been frozen by another thread, just * give up and retry, avoiding unnecessary page table * allocation and free. */ -- cgit v1.2.3 From 7ee0e638a526b2d1f09c714f86d82dfd7628f322 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 1 Jun 2021 21:30:56 +0200 Subject: x86/alternative: Align insn bytes vertically For easier inspection which bytes have changed. For example: feat: 7*32+12, old: (__x86_indirect_thunk_r10+0x0/0x20 (ffffffff81c02480) len: 17), repl: (ffffffff897813aa, len: 17) ffffffff81c02480: old_insn: 41 ff e2 90 90 90 90 90 90 90 90 90 90 90 90 90 90 ffffffff897813aa: rpl_insn: e8 07 00 00 00 f3 90 0f ae e8 eb f9 4c 89 14 24 c3 ffffffff81c02480: final_insn: e8 07 00 00 00 f3 90 0f ae e8 eb f9 4c 89 14 24 c3 No functional changes. Signed-off-by: Borislav Petkov Acked-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210601193713.16190-1-bp@alien8.de --- arch/x86/kernel/alternative.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 75c752b0628c..227c4a8b145a 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -273,8 +273,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, instr, instr, a->instrlen, replacement, a->replacementlen); - DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); - DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); + DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr); + DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement); memcpy(insn_buff, replacement, a->replacementlen); insn_buff_sz = a->replacementlen; -- cgit v1.2.3 From 1a6a9044b96729abacede172d7591c714a5b81d1 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 1 Jun 2021 10:53:53 +0300 Subject: x86/setup: Remove CONFIG_X86_RESERVE_LOW and reservelow= options The CONFIG_X86_RESERVE_LOW build time and reservelow= command line option allowed to control the amount of memory under 1M that would be reserved at boot to avoid using memory that can be potentially clobbered by BIOS. Since the entire range under 1M is always reserved there is no need for these options anymore and they can be removed. Signed-off-by: Mike Rapoport Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20210601075354.5149-3-rppt@kernel.org --- Documentation/admin-guide/kernel-parameters.txt | 5 ----- arch/x86/Kconfig | 29 ------------------------- arch/x86/kernel/setup.c | 24 -------------------- 3 files changed, 58 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index cb89dbdedc46..d7d813032c51 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -4775,11 +4775,6 @@ Reserves a hole at the top of the kernel virtual address space. - reservelow= [X86] - Format: nn[K] - Set the amount of memory to reserve for BIOS at - the bottom of the address space. - reset_devices [KNL] Force drivers to reset the underlying device during initialization. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0045e1b44190..86dae426798b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1693,35 +1693,6 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK Set whether the default state of memory_corruption_check is on or off. -config X86_RESERVE_LOW - int "Amount of low memory, in kilobytes, to reserve for the BIOS" - default 64 - range 4 640 - help - Specify the amount of low memory to reserve for the BIOS. - - The first page contains BIOS data structures that the kernel - must not use, so that page must always be reserved. - - By default we reserve the first 64K of physical RAM, as a - number of BIOSes are known to corrupt that memory range - during events such as suspend/resume or monitor cable - insertion, so it must not be used by the kernel. - - You can set this to 4 if you are absolutely sure that you - trust the BIOS to get all its memory reservations and usages - right. If you know your BIOS have problems beyond the - default 64K area, you can set this to 640 to avoid using the - entire low memory range. - - If you have doubts about the BIOS (e.g. suspend/resume does - not work or there's kernel crashes after certain hardware - hotplug events) then you might want to enable - X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check - typical corruption patterns. - - Leave this to the default value of 64 if you are unsure. - config MATH_EMULATION bool depends on MODIFY_LDT_SYSCALL diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 1e720626069a..7638ac6c3d80 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -695,30 +695,6 @@ static void __init e820_add_kernel_range(void) e820__range_add(start, size, E820_TYPE_RAM); } -static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10; - -static int __init parse_reservelow(char *p) -{ - unsigned long long size; - - if (!p) - return -EINVAL; - - size = memparse(p, &p); - - if (size < 4096) - size = 4096; - - if (size > 640*1024) - size = 640*1024; - - reserve_low = size; - - return 0; -} - -early_param("reservelow", parse_reservelow); - static void __init early_reserve_memory(void) { /* -- cgit v1.2.3 From 23721c8e92f73f9f89e7362c50c2996a5c9ad483 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Tue, 1 Jun 2021 10:53:54 +0300 Subject: x86/crash: Remove crash_reserve_low_1M() The entire memory range under 1M is unconditionally reserved in setup_arch(), so there is no need for crash_reserve_low_1M() anymore. Remove this function. Signed-off-by: Mike Rapoport Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20210601075354.5149-4-rppt@kernel.org --- arch/x86/include/asm/crash.h | 6 ------ arch/x86/kernel/crash.c | 13 ------------- 2 files changed, 19 deletions(-) diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h index f58de66091e5..8b6bd63530dc 100644 --- a/arch/x86/include/asm/crash.h +++ b/arch/x86/include/asm/crash.h @@ -9,10 +9,4 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params); void crash_smp_send_stop(void); -#ifdef CONFIG_KEXEC_CORE -void __init crash_reserve_low_1M(void); -#else -static inline void __init crash_reserve_low_1M(void) { } -#endif - #endif /* _ASM_X86_CRASH_H */ diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 54ce999ed321..e8326a8d1c5d 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -70,19 +70,6 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void) rcu_read_unlock(); } -/* - * When the crashkernel option is specified, only use the low - * 1M for the real mode trampoline. - */ -void __init crash_reserve_low_1M(void) -{ - if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0) - return; - - memblock_reserve(0, 1<<20); - pr_info("Reserving the low 1M of memory for crashkernel\n"); -} - #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) static void kdump_nmi_callback(int cpu, struct pt_regs *regs) -- cgit v1.2.3 From ec35d1d93bf8976f0668cb1026ea8c7d7bcad3c1 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 8 Jun 2021 22:17:10 +0200 Subject: x86/setup: Document that Windows reserves the first MiB It does so unconditionally too, on Intel and AMD machines, to work around BIOS bugs, as confirmed by Microsoft folks (see Link for full details). Reflow the paragraph, while at it. Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/MWHPR21MB159330952629D36EEDE706B3D7379@MWHPR21MB1593.namprd21.prod.outlook.com --- arch/x86/kernel/setup.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 7638ac6c3d80..85acd22f8022 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1060,17 +1060,18 @@ void __init setup_arch(char **cmdline_p) #endif /* - * Find free memory for the real mode trampoline and place it - * there. - * If there is not enough free memory under 1M, on EFI-enabled - * systems there will be additional attempt to reclaim the memory - * for the real mode trampoline at efi_free_boot_services(). + * Find free memory for the real mode trampoline and place it there. If + * there is not enough free memory under 1M, on EFI-enabled systems + * there will be additional attempt to reclaim the memory for the real + * mode trampoline at efi_free_boot_services(). * - * Unconditionally reserve the entire first 1M of RAM because - * BIOSes are know to corrupt low memory and several - * hundred kilobytes are not worth complex detection what memory gets - * clobbered. Moreover, on machines with SandyBridge graphics or in - * setups that use crashkernel the entire 1M is reserved anyway. + * Unconditionally reserve the entire first 1M of RAM because BIOSes + * are known to corrupt low memory and several hundred kilobytes are not + * worth complex detection what memory gets clobbered. Windows does the + * same thing for very similar reasons. + * + * Moreover, on machines with SandyBridge graphics or in setups that use + * crashkernel the entire 1M is reserved anyway. */ reserve_real_mode(); -- cgit v1.2.3 From 0e5a89dbb49920cea22193044bbbfd76a9b0f458 Mon Sep 17 00:00:00 2001 From: Hubert Jasudowicz Date: Wed, 9 Jun 2021 23:51:12 +0200 Subject: doc: Remove references to IBM Calgary The Calgary IOMMU driver has been removed in 90dc392fc445 ("x86: Remove the calgary IOMMU driver") Clean up stale docs that refer to it. Signed-off-by: Hubert Jasudowicz Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/1bd2b57dd1db53df09e520b8170ff61418805de4.1623274832.git.hubert.jasudowicz@gmail.com --- Documentation/x86/x86_64/boot-options.rst | 31 +------------------------------ 1 file changed, 1 insertion(+), 30 deletions(-) diff --git a/Documentation/x86/x86_64/boot-options.rst b/Documentation/x86/x86_64/boot-options.rst index 324cefff92e7..5f62b3b86357 100644 --- a/Documentation/x86/x86_64/boot-options.rst +++ b/Documentation/x86/x86_64/boot-options.rst @@ -247,16 +247,11 @@ Multiple x86-64 PCI-DMA mapping implementations exist, for example: Kernel boot message: "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)" - 4. : IBM Calgary hardware IOMMU. Used in IBM - pSeries and xSeries servers. This hardware IOMMU supports DMA address - mapping with memory protection, etc. - Kernel boot message: "PCI-DMA: Using Calgary IOMMU" - :: iommu=[][,noagp][,off][,force][,noforce] [,memaper[=]][,merge][,fullflush][,nomerge] - [,noaperture][,calgary] + [,noaperture] General iommu options: @@ -295,8 +290,6 @@ iommu options only relevant to the AMD GART hardware IOMMU: Don't initialize the AGP driver and use full aperture. panic Always panic when IOMMU overflows. - calgary - Use the Calgary IOMMU if it is available iommu options only relevant to the software bounce buffering (SWIOTLB) IOMMU implementation: @@ -307,28 +300,6 @@ implementation: force Force all IO through the software TLB. -Settings for the IBM Calgary hardware IOMMU currently found in IBM -pSeries and xSeries machines - - calgary=[64k,128k,256k,512k,1M,2M,4M,8M] - Set the size of each PCI slot's translation table when using the - Calgary IOMMU. This is the size of the translation table itself - in main memory. The smallest table, 64k, covers an IO space of - 32MB; the largest, 8MB table, can cover an IO space of 4GB. - Normally the kernel will make the right choice by itself. - calgary=[translate_empty_slots] - Enable translation even on slots that have no devices attached to - them, in case a device will be hotplugged in the future. - calgary=[disable=] - Disable translation on a given PHB. For - example, the built-in graphics adapter resides on the first bridge - (PCI bus number 0); if translation (isolation) is enabled on this - bridge, X servers that access the hardware directly from user - space might stop working. Use this option if you have devices that - are accessed from userspace directly on some PCI host bridge. - panic - Always panic when IOMMU overflows - Miscellaneous ============= -- cgit v1.2.3 From 1d3156396cf6ea0873145092f4e040374ff1d862 Mon Sep 17 00:00:00 2001 From: ChenXiaoSong Date: Wed, 9 Jun 2021 11:55:10 +0800 Subject: x86/sgx: Correct kernel-doc's arg name in sgx_encl_release() Fix the following kernel-doc warning: arch/x86/kernel/cpu/sgx/encl.c:392: warning: Function parameter \ or member 'ref' not described in 'sgx_encl_release' [ bp: Massage commit message. ] Signed-off-by: ChenXiaoSong Signed-off-by: Borislav Petkov Link: https://lkml.kernel.org/r/20210609035510.2083694-1-chenxiaosong2@huawei.com --- arch/x86/kernel/cpu/sgx/encl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c index 3be203297988..001808e3901c 100644 --- a/arch/x86/kernel/cpu/sgx/encl.c +++ b/arch/x86/kernel/cpu/sgx/encl.c @@ -383,7 +383,7 @@ const struct vm_operations_struct sgx_vm_ops = { /** * sgx_encl_release - Destroy an enclave instance - * @kref: address of a kref inside &sgx_encl + * @ref: address of a kref inside &sgx_encl * * Used together with kref_put(). Frees all the resources associated with the * enclave and the instance itself. -- cgit v1.2.3