summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Lendacky <Thomas.Lendacky@amd.com>2019-06-19 21:40:57 +0300
committerBorislav Petkov <bp@suse.de>2019-06-20 10:22:47 +0300
commitc603a309cc75f3dd018ddb20ee44c05047918cbf (patch)
tree453384b20d54aeecfb2f5033d06d1fcf18f81b76 /arch/x86
parent9e0babf2c06c73cda2c0cd37a1653d823adb40ec (diff)
downloadlinux-c603a309cc75f3dd018ddb20ee44c05047918cbf.tar.xz
x86/mm: Identify the end of the kernel area to be reserved
The memory occupied by the kernel is reserved using memblock_reserve() in setup_arch(). Currently, the area is from symbols _text to __bss_stop. Everything after __bss_stop must be specifically reserved otherwise it is discarded. This is not clearly documented. Add a new symbol, __end_of_kernel_reserve, that more readily identifies what is reserved, along with comments that indicate what is reserved, what is discarded and what needs to be done to prevent a section from being discarded. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Baoquan He <bhe@redhat.com> Reviewed-by: Dave Hansen <dave.hansen@intel.com> Tested-by: Lianbo Jiang <lijiang@redhat.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Dave Young <dyoung@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Juergen Gross <jgross@suse.com> Cc: Kees Cook <keescook@chromium.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Pavel Tatashin <pasha.tatashin@oracle.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Robert Richter <rrichter@marvell.com> Cc: Sami Tolvanen <samitolvanen@google.com> Cc: Sinan Kaya <okaya@codeaurora.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "x86@kernel.org" <x86@kernel.org> Link: https://lkml.kernel.org/r/7db7da45b435f8477f25e66f292631ff766a844c.1560969363.git.thomas.lendacky@amd.com
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/sections.h2
-rw-r--r--arch/x86/kernel/setup.c8
-rw-r--r--arch/x86/kernel/vmlinux.lds.S9
3 files changed, 17 insertions, 2 deletions
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 8ea1cfdbeabc..71b32f2570ab 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -13,4 +13,6 @@ extern char __end_rodata_aligned[];
extern char __end_rodata_hpage_align[];
#endif
+extern char __end_of_kernel_reserve[];
+
#endif /* _ASM_X86_SECTIONS_H */
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 08a5f4a131f5..dac60ad37e5e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -827,8 +827,14 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
void __init setup_arch(char **cmdline_p)
{
+ /*
+ * Reserve the memory occupied by the kernel between _text and
+ * __end_of_kernel_reserve symbols. Any kernel sections after the
+ * __end_of_kernel_reserve symbol must be explicitly reserved with a
+ * separate memblock_reserve() or they will be discarded.
+ */
memblock_reserve(__pa_symbol(_text),
- (unsigned long)__bss_stop - (unsigned long)_text);
+ (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
/*
* Make sure page 0 is always reserved because on systems with
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 0850b5149345..ca2252ca6ad7 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -368,6 +368,14 @@ SECTIONS
__bss_stop = .;
}
+ /*
+ * The memory occupied from _text to here, __end_of_kernel_reserve, is
+ * automatically reserved in setup_arch(). Anything after here must be
+ * explicitly reserved using memblock_reserve() or it will be discarded
+ * and treated as available memory.
+ */
+ __end_of_kernel_reserve = .;
+
. = ALIGN(PAGE_SIZE);
.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
__brk_base = .;
@@ -382,7 +390,6 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
- /* Sections to be discarded */
DISCARDS
/DISCARD/ : {
*(.eh_frame)