summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 03:25:06 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-27 03:25:06 +0300
commit38fabca18fc4c832ea95e2d14fb1ecde8b7dcc56 (patch)
treed438aa72c68a11e6715ce07093dc2e30378df3af /arch
parent8465625ab4700e3e1db506ed8a541f7796356d63 (diff)
parent29434801e7e9c6d05fbea4533b3c0bd6be612f62 (diff)
downloadlinux-38fabca18fc4c832ea95e2d14fb1ecde8b7dcc56.tar.xz
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "Two changes: - Remove (some) remnants of the vDSO's fake section table mechanism that were left behind when the vDSO build process reverted to using "objdump -S" to strip the userspace image. - Remove hardcoded POPCNT mnemonics now that the minimum binutils version supports the symbolic form" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso: Remove a stale/misleading comment from the linker script x86/vdso: Remove obsolete "fake section table" reservation x86: Use POPCNT mnemonics in arch_hweight.h
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/entry/vdso/vdso-layout.lds.S27
-rw-r--r--arch/x86/entry/vdso/vdso2c.c8
-rw-r--r--arch/x86/include/asm/arch_hweight.h10
3 files changed, 2 insertions, 43 deletions
diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S
index acfd5ba7d943..93c6dc7812d0 100644
--- a/arch/x86/entry/vdso/vdso-layout.lds.S
+++ b/arch/x86/entry/vdso/vdso-layout.lds.S
@@ -7,16 +7,6 @@
* This script controls its layout.
*/
-#if defined(BUILD_VDSO64)
-# define SHDR_SIZE 64
-#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
-# define SHDR_SIZE 40
-#else
-# error unknown VDSO target
-#endif
-
-#define NUM_FAKE_SHDRS 13
-
SECTIONS
{
/*
@@ -60,20 +50,8 @@ SECTIONS
*(.bss*)
*(.dynbss*)
*(.gnu.linkonce.b.*)
-
- /*
- * Ideally this would live in a C file, but that won't
- * work cleanly for x32 until we start building the x32
- * C code using an x32 toolchain.
- */
- VDSO_FAKE_SECTION_TABLE_START = .;
- . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
- VDSO_FAKE_SECTION_TABLE_END = .;
} :text
- .fake_shstrtab : { *(.fake_shstrtab) } :text
-
-
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
@@ -87,11 +65,6 @@ SECTIONS
.text : { *(.text*) } :text =0x90909090,
- /*
- * At the end so that eu-elflint stays happy when vdso2c strips
- * these. A better implementation would avoid allocating space
- * for these.
- */
.altinstructions : { *(.altinstructions) } :text
.altinstr_replacement : { *(.altinstr_replacement) } :text
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 4674f58581a1..8e470b018512 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -76,8 +76,6 @@ enum {
sym_hpet_page,
sym_pvclock_page,
sym_hvclock_page,
- sym_VDSO_FAKE_SECTION_TABLE_START,
- sym_VDSO_FAKE_SECTION_TABLE_END,
};
const int special_pages[] = {
@@ -98,12 +96,6 @@ struct vdso_sym required_syms[] = {
[sym_hpet_page] = {"hpet_page", true},
[sym_pvclock_page] = {"pvclock_page", true},
[sym_hvclock_page] = {"hvclock_page", true},
- [sym_VDSO_FAKE_SECTION_TABLE_START] = {
- "VDSO_FAKE_SECTION_TABLE_START", false
- },
- [sym_VDSO_FAKE_SECTION_TABLE_END] = {
- "VDSO_FAKE_SECTION_TABLE_END", false
- },
{"VDSO32_NOTE_MASK", true},
{"__kernel_vsyscall", true},
{"__kernel_sigreturn", true},
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index 34a10b2d5b73..fc0693569f7a 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -5,15 +5,9 @@
#include <asm/cpufeatures.h>
#ifdef CONFIG_64BIT
-/* popcnt %edi, %eax */
-#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
-/* popcnt %rdi, %rax */
-#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
#define REG_IN "D"
#define REG_OUT "a"
#else
-/* popcnt %eax, %eax */
-#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
#define REG_IN "a"
#define REG_OUT "a"
#endif
@@ -24,7 +18,7 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
- asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
+ asm (ALTERNATIVE("call __sw_hweight32", "popcntl %1, %0", X86_FEATURE_POPCNT)
: "="REG_OUT (res)
: REG_IN (w));
@@ -52,7 +46,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
- asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
+ asm (ALTERNATIVE("call __sw_hweight64", "popcntq %1, %0", X86_FEATURE_POPCNT)
: "="REG_OUT (res)
: REG_IN (w));