From e3c0f6f3fbdaca7b006dc606def9973233549777 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 23 Jul 2019 16:49:43 +0900 Subject: sh: use __builtin_constant_p() directly instead of IS_IMMEDIATE() __builtin_constant_p(nr) is used everywhere now. It does not make much sense to define IS_IMMEDIATE() as its alias. Signed-off-by: Masahiro Yamada --- arch/sh/include/asm/bitops-op32.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h index 466880362ad1..cfe5465acce7 100644 --- a/arch/sh/include/asm/bitops-op32.h +++ b/arch/sh/include/asm/bitops-op32.h @@ -16,11 +16,9 @@ #define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE) #endif -#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr)) - static inline void __set_bit(int nr, volatile unsigned long *addr) { - if (IS_IMMEDIATE(nr)) { + if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( "bset.b %1, @(%O2,%0) ! __set_bit\n\t" : "+r" (addr) @@ -37,7 +35,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) static inline void __clear_bit(int nr, volatile unsigned long *addr) { - if (IS_IMMEDIATE(nr)) { + if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( "bclr.b %1, @(%O2,%0) ! __clear_bit\n\t" : "+r" (addr) @@ -64,7 +62,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) */ static inline void __change_bit(int nr, volatile unsigned long *addr) { - if (IS_IMMEDIATE(nr)) { + if (__builtin_constant_p(nr)) { __asm__ __volatile__ ( "bxor.b %1, @(%O2,%0) ! __change_bit\n\t" : "+r" (addr) -- cgit v1.2.3 From c9ee4bf9e0f5e353ae1c81ee30a373b6653329d7 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 23 Jul 2019 19:21:06 +0900 Subject: h8300: move definition of __kernel_size_t etc. to posix_types.h These types should be defined in posix_types.h, not in bitsperlong.h . With these defines moved, h8300-specific bitsperlong.h is no longer needed since Kbuild will automatically create a wrapper of include/uapi/asm-generic/bitsperlong.h Signed-off-by: Masahiro Yamada --- arch/h8300/include/uapi/asm/bitsperlong.h | 15 --------------- arch/h8300/include/uapi/asm/posix_types.h | 13 +++++++++++++ 2 files changed, 13 insertions(+), 15 deletions(-) delete mode 100644 arch/h8300/include/uapi/asm/bitsperlong.h create mode 100644 arch/h8300/include/uapi/asm/posix_types.h (limited to 'arch') diff --git a/arch/h8300/include/uapi/asm/bitsperlong.h b/arch/h8300/include/uapi/asm/bitsperlong.h deleted file mode 100644 index a33e358f1c1b..000000000000 --- a/arch/h8300/include/uapi/asm/bitsperlong.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _UAPI__ASM_H8300_BITS_PER_LONG -#define _UAPI__ASM_H8300_BITS_PER_LONG - -#include - -#if !defined(__ASSEMBLY__) -/* h8300-unknown-linux required long */ -#define __kernel_size_t __kernel_size_t -typedef unsigned long __kernel_size_t; -typedef long __kernel_ssize_t; -typedef long __kernel_ptrdiff_t; -#endif - -#endif /* _UAPI__ASM_H8300_BITS_PER_LONG */ diff --git a/arch/h8300/include/uapi/asm/posix_types.h b/arch/h8300/include/uapi/asm/posix_types.h new file mode 100644 index 000000000000..3efc9dd59476 --- /dev/null +++ b/arch/h8300/include/uapi/asm/posix_types.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_POSIX_TYPES_H +#define _UAPI_ASM_POSIX_TYPES_H + +/* h8300-unknown-linux required long */ +#define __kernel_size_t __kernel_size_t +typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; +typedef long __kernel_ptrdiff_t; + +#include + +#endif /* _UAPI_ASM_POSIX_TYPES_H */ -- cgit v1.2.3 From 7d538b5a1ddf79c17d9fd4ce7a5009d11978d350 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 23 Jul 2019 20:30:36 +0900 Subject: sh: remove unneeded uapi asm-generic wrappers These are listed in include/uapi/asm-generic/Kbuild, so Kbuild will automatically generate them. Signed-off-by: Masahiro Yamada --- arch/sh/include/uapi/asm/setup.h | 2 -- arch/sh/include/uapi/asm/types.h | 2 -- 2 files changed, 4 deletions(-) delete mode 100644 arch/sh/include/uapi/asm/setup.h delete mode 100644 arch/sh/include/uapi/asm/types.h (limited to 'arch') diff --git a/arch/sh/include/uapi/asm/setup.h b/arch/sh/include/uapi/asm/setup.h deleted file mode 100644 index 4bd19f80f9b0..000000000000 --- a/arch/sh/include/uapi/asm/setup.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include diff --git a/arch/sh/include/uapi/asm/types.h b/arch/sh/include/uapi/asm/types.h deleted file mode 100644 index 68100e108ea6..000000000000 --- a/arch/sh/include/uapi/asm/types.h +++ /dev/null @@ -1,2 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#include -- cgit v1.2.3 From 418d6e295e4375e1097c511da11795e592aedbeb Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 17:00:50 +0900 Subject: x86: remove unneeded defined(__ASSEMBLY__) check from asm/dwarf2.h This header file has the following check at the top: #ifndef __ASSEMBLY__ #warning "asm/dwarf2.h should be only included in pure assembly files" #endif So, we expect defined(__ASSEMBLY__) is always true. Signed-off-by: Masahiro Yamada Reviewed-by: Jason A. Donenfeld Reviewed-by: Nick Desaulniers Acked-by: Ingo Molnar --- arch/x86/include/asm/dwarf2.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index f71a0cce9373..87054cc8272e 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h @@ -36,7 +36,7 @@ #define CFI_SIGNAL_FRAME #endif -#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__) +#if defined(CONFIG_AS_CFI_SECTIONS) #ifndef BUILD_VDSO /* * Emit CFI data in .debug_frame sections, not .eh_frame sections. -- cgit v1.2.3 From 0f2661c4b935e5aa7d309f6c5072d4c49465137f Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 17:00:51 +0900 Subject: x86: remove always-defined CONFIG_AS_CFI CONFIG_AS_CFI was introduced by commit e2414910f212 ("[PATCH] x86: Detect CFI support in the assembler at runtime"), and extended by commit f0f12d85af85 ("x86_64: Check for .cfi_rel_offset in CFI probe"). We raise the minimal supported binutils version from time to time. The last bump was commit 1fb12b35e5ff ("kbuild: Raise the minimum required binutils version to 2.21"). I confirmed the code in $(call as-instr,...) can be assembled by the binutils 2.21 assembler and also by LLVM integrated assembler. Remove CONFIG_AS_CFI, which is always defined. Signed-off-by: Masahiro Yamada Reviewed-by: Jason A. Donenfeld Reviewed-by: Nick Desaulniers Acked-by: Ingo Molnar --- arch/x86/Makefile | 10 ++-------- arch/x86/include/asm/dwarf2.h | 36 ------------------------------------ 2 files changed, 2 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 513a55562d75..72f8f744ebd7 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -177,12 +177,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) endif -# Stackpointer is addressed different for 32 bit and 64 bit x86 -sp-$(CONFIG_X86_32) := esp -sp-$(CONFIG_X86_64) := rsp - -# do binutils support CFI? -cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1) # is .cfi_signal_frame supported too? cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) @@ -196,8 +190,8 @@ sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI= sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_AFLAGS += $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_CFLAGS += $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index 87054cc8272e..76cc07ff1002 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h @@ -6,15 +6,6 @@ #warning "asm/dwarf2.h should be only included in pure assembly files" #endif -/* - * Macros for dwarf2 CFI unwind table entries. - * See "as.info" for details on these pseudo ops. Unfortunately - * they are only supported in very new binutils, so define them - * away for older version. - */ - -#ifdef CONFIG_AS_CFI - #define CFI_STARTPROC .cfi_startproc #define CFI_ENDPROC .cfi_endproc #define CFI_DEF_CFA .cfi_def_cfa @@ -55,31 +46,4 @@ #endif #endif -#else - -/* - * Due to the structure of pre-exisiting code, don't use assembler line - * comment character # to ignore the arguments. Instead, use a dummy macro. - */ -.macro cfi_ignore a=0, b=0, c=0, d=0 -.endm - -#define CFI_STARTPROC cfi_ignore -#define CFI_ENDPROC cfi_ignore -#define CFI_DEF_CFA cfi_ignore -#define CFI_DEF_CFA_REGISTER cfi_ignore -#define CFI_DEF_CFA_OFFSET cfi_ignore -#define CFI_ADJUST_CFA_OFFSET cfi_ignore -#define CFI_OFFSET cfi_ignore -#define CFI_REL_OFFSET cfi_ignore -#define CFI_REGISTER cfi_ignore -#define CFI_RESTORE cfi_ignore -#define CFI_REMEMBER_STATE cfi_ignore -#define CFI_RESTORE_STATE cfi_ignore -#define CFI_UNDEFINED cfi_ignore -#define CFI_ESCAPE cfi_ignore -#define CFI_SIGNAL_FRAME cfi_ignore - -#endif - #endif /* _ASM_X86_DWARF2_H */ -- cgit v1.2.3 From 46427f658e90eda40b85313b5cff05bdc56c1988 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 17:00:52 +0900 Subject: x86: remove unneeded (CONFIG_AS_)CFI_SIGNAL_FRAME Commit 131484c8da97 ("x86/debug: Remove perpetually broken, unmaintainable dwarf annotations") removes all the users of CFI_SIGNAL_FRAME. Remove the CFI_SIGNAL_FRAME and CONFIG_AS_CFI_SIGNAL_FRAME. Signed-off-by: Masahiro Yamada Reviewed-by: Jason A. Donenfeld Reviewed-by: Nick Desaulniers Acked-by: Ingo Molnar --- arch/x86/Makefile | 6 ++---- arch/x86/include/asm/dwarf2.h | 6 ------ 2 files changed, 2 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 72f8f744ebd7..dd275008fc59 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -177,8 +177,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) endif -# is .cfi_signal_frame supported too? -cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) # does binutils support specific instructions? @@ -190,8 +188,8 @@ sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI= sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) -KBUILD_AFLAGS += $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) -KBUILD_CFLAGS += $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_AFLAGS += $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_CFLAGS += $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index 76cc07ff1002..882db09d5dfb 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h @@ -21,12 +21,6 @@ #define CFI_UNDEFINED .cfi_undefined #define CFI_ESCAPE .cfi_escape -#ifdef CONFIG_AS_CFI_SIGNAL_FRAME -#define CFI_SIGNAL_FRAME .cfi_signal_frame -#else -#define CFI_SIGNAL_FRAME -#endif - #if defined(CONFIG_AS_CFI_SECTIONS) #ifndef BUILD_VDSO /* -- cgit v1.2.3 From 48e24723d0b54a2c9e53dd96c8864cc0afc7b0ee Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 17:00:53 +0900 Subject: x86: remove always-defined CONFIG_AS_CFI_SECTIONS CONFIG_AS_CFI_SECTIONS was introduced by commit 9e565292270a ("x86: Use .cfi_sections for assembly code"). We raise the minimal supported binutils version from time to time. The last bump was commit 1fb12b35e5ff ("kbuild: Raise the minimum required binutils version to 2.21"). I confirmed the code in $(call as-instr,...) can be assembled by the binutils 2.21 assembler and also by LLVM integrated assembler. Remove CONFIG_AS_CFI_SECTIONS, which is always defined. Signed-off-by: Masahiro Yamada Reviewed-by: Jason A. Donenfeld Reviewed-by: Nick Desaulniers Acked-by: Ingo Molnar --- arch/x86/Makefile | 6 ++---- arch/x86/include/asm/dwarf2.h | 2 -- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index dd275008fc59..e4a062313bb0 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -177,8 +177,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) endif -cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1) - # does binutils support specific instructions? asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) @@ -188,8 +186,8 @@ sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI= sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) -KBUILD_AFLAGS += $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) -KBUILD_CFLAGS += $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h index 882db09d5dfb..430fca13bb56 100644 --- a/arch/x86/include/asm/dwarf2.h +++ b/arch/x86/include/asm/dwarf2.h @@ -21,7 +21,6 @@ #define CFI_UNDEFINED .cfi_undefined #define CFI_ESCAPE .cfi_escape -#if defined(CONFIG_AS_CFI_SECTIONS) #ifndef BUILD_VDSO /* * Emit CFI data in .debug_frame sections, not .eh_frame sections. @@ -38,6 +37,5 @@ */ .cfi_sections .eh_frame, .debug_frame #endif -#endif #endif /* _ASM_X86_DWARF2_H */ -- cgit v1.2.3 From 92203b02805d99d8aca88b1c6b93c721237205fe Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 17:00:54 +0900 Subject: x86: remove always-defined CONFIG_AS_SSSE3 CONFIG_AS_SSSE3 was introduced by commit 75aaf4c3e6a4 ("x86/raid6: correctly check for assembler capabilities"). We raise the minimal supported binutils version from time to time. The last bump was commit 1fb12b35e5ff ("kbuild: Raise the minimum required binutils version to 2.21"). I confirmed the code in $(call as-instr,...) can be assembled by the binutils 2.21 assembler and also by LLVM integrated assembler. Remove CONFIG_AS_SSSE3, which is always defined. I added ifdef CONFIG_X86 to lib/raid6/algos.c to avoid link errors on non-x86 architectures. lib/raid6/algos.c is built not only for the kernel but also for testing the library code from userspace. I added -DCONFIG_X86 to lib/raid6/test/Makefile to cator to this usecase. Signed-off-by: Masahiro Yamada Reviewed-by: Jason A. Donenfeld Reviewed-by: Nick Desaulniers Acked-by: Ingo Molnar --- arch/x86/Makefile | 5 ++--- arch/x86/crypto/blake2s-core.S | 2 -- lib/raid6/algos.c | 2 +- lib/raid6/recov_ssse3.c | 6 ------ lib/raid6/test/Makefile | 4 +--- 5 files changed, 4 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index e4a062313bb0..94f89612e024 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -178,7 +178,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) endif # does binutils support specific instructions? -asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1) avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) @@ -186,8 +185,8 @@ sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI= sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) -KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) -KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_AFLAGS += $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_CFLAGS += $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S index 24910b766bdd..2ca79974f819 100644 --- a/arch/x86/crypto/blake2s-core.S +++ b/arch/x86/crypto/blake2s-core.S @@ -46,7 +46,6 @@ SIGMA2: #endif /* CONFIG_AS_AVX512 */ .text -#ifdef CONFIG_AS_SSSE3 SYM_FUNC_START(blake2s_compress_ssse3) testq %rdx,%rdx je .Lendofloop @@ -174,7 +173,6 @@ SYM_FUNC_START(blake2s_compress_ssse3) .Lendofloop: ret SYM_FUNC_END(blake2s_compress_ssse3) -#endif /* CONFIG_AS_SSSE3 */ #ifdef CONFIG_AS_AVX512 SYM_FUNC_START(blake2s_compress_avx512) diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index bf1b4765c8f6..df08664d3432 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -97,13 +97,13 @@ void (*raid6_datap_recov)(int, size_t, int, void **); EXPORT_SYMBOL_GPL(raid6_datap_recov); const struct raid6_recov_calls *const raid6_recov_algos[] = { +#ifdef CONFIG_X86 #ifdef CONFIG_AS_AVX512 &raid6_recov_avx512, #endif #ifdef CONFIG_AS_AVX2 &raid6_recov_avx2, #endif -#ifdef CONFIG_AS_SSSE3 &raid6_recov_ssse3, #endif #ifdef CONFIG_S390 diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c index 1de97d2405d0..4bfa3c6b60de 100644 --- a/lib/raid6/recov_ssse3.c +++ b/lib/raid6/recov_ssse3.c @@ -3,8 +3,6 @@ * Copyright (C) 2012 Intel Corporation */ -#ifdef CONFIG_AS_SSSE3 - #include #include "x86.h" @@ -328,7 +326,3 @@ const struct raid6_recov_calls raid6_recov_ssse3 = { #endif .priority = 1, }; - -#else -#warning "your version of binutils lacks SSSE3 support" -#endif diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index b9e6c3648be1..60021319ac78 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -34,9 +34,7 @@ endif ifeq ($(IS_X86),yes) OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o - CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \ - gcc -c -x assembler - >/dev/null 2>&1 && \ - rm ./-.o && echo -DCONFIG_AS_SSSE3=1) + CFLAGS += -DCONFIG_X86 CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \ gcc -c -x assembler - >/dev/null 2>&1 && \ rm ./-.o && echo -DCONFIG_AS_AVX2=1) -- cgit v1.2.3 From 42251572c4687813d8e7b1d363a23d0f9201e69f Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 17:00:55 +0900 Subject: x86: remove always-defined CONFIG_AS_AVX CONFIG_AS_AVX was introduced by commit ea4d26ae24e5 ("raid5: add AVX optimized RAID5 checksumming"). We raise the minimal supported binutils version from time to time. The last bump was commit 1fb12b35e5ff ("kbuild: Raise the minimum required binutils version to 2.21"). I confirmed the code in $(call as-instr,...) can be assembled by the binutils 2.21 assembler and also by LLVM integrated assembler. Remove CONFIG_AS_AVX, which is always defined. Signed-off-by: Masahiro Yamada Reviewed-by: Jason A. Donenfeld Acked-by: Ingo Molnar --- arch/x86/Makefile | 5 ++--- arch/x86/crypto/Makefile | 32 ++++++++++----------------- arch/x86/crypto/aesni-intel_avx-x86_64.S | 3 --- arch/x86/crypto/aesni-intel_glue.c | 14 +----------- arch/x86/crypto/poly1305-x86_64-cryptogams.pl | 8 ------- arch/x86/crypto/poly1305_glue.c | 6 ++--- arch/x86/crypto/sha1_ssse3_asm.S | 4 ---- arch/x86/crypto/sha1_ssse3_glue.c | 9 +------- arch/x86/crypto/sha256-avx-asm.S | 3 --- arch/x86/crypto/sha256_ssse3_glue.c | 8 +------ arch/x86/crypto/sha512-avx-asm.S | 2 -- arch/x86/crypto/sha512_ssse3_glue.c | 7 +----- arch/x86/include/asm/xor_avx.h | 9 -------- 13 files changed, 21 insertions(+), 89 deletions(-) (limited to 'arch') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 94f89612e024..f32ef7b8d5ca 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -178,15 +178,14 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) endif # does binutils support specific instructions? -avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1) avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1) sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) -KBUILD_AFLAGS += $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) -KBUILD_CFLAGS += $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_AFLAGS += $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) +KBUILD_CFLAGS += $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 8c2e9eadee8a..1a044908d42d 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -5,7 +5,6 @@ OBJECT_FILES_NON_STANDARD := y -avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no) avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ $(comma)4)$(comma)%ymm2,yes,no) avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no) @@ -47,15 +46,12 @@ ifeq ($(adx_supported),yes) endif # These modules require assembler to support AVX. -ifeq ($(avx_supported),yes) - obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += \ - camellia-aesni-avx-x86_64.o - obj-$(CONFIG_CRYPTO_CAST5_AVX_X86_64) += cast5-avx-x86_64.o - obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o - obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o - obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o - obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o -endif +obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += camellia-aesni-avx-x86_64.o +obj-$(CONFIG_CRYPTO_CAST5_AVX_X86_64) += cast5-avx-x86_64.o +obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o +obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o +obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o +obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o # These modules require assembler to support AVX2. ifeq ($(avx2_supported),yes) @@ -83,16 +79,12 @@ ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),) targets += poly1305-x86_64-cryptogams.S endif -ifeq ($(avx_supported),yes) - camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \ - camellia_aesni_avx_glue.o - cast5-avx-x86_64-y := cast5-avx-x86_64-asm_64.o cast5_avx_glue.o - cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o - twofish-avx-x86_64-y := twofish-avx-x86_64-asm_64.o \ - twofish_avx_glue.o - serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o \ - serpent_avx_glue.o -endif +camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \ + camellia_aesni_avx_glue.o +cast5-avx-x86_64-y := cast5-avx-x86_64-asm_64.o cast5_avx_glue.o +cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o +twofish-avx-x86_64-y := twofish-avx-x86_64-asm_64.o twofish_avx_glue.o +serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o ifeq ($(avx2_supported),yes) camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index bfa1c0b3e5b4..cc56ee43238b 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -886,7 +886,6 @@ _less_than_8_bytes_left_\@: _partial_block_done_\@: .endm # PARTIAL_BLOCK -#ifdef CONFIG_AS_AVX ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) # Input: A and B (128-bits each, bit-reflected) @@ -1869,8 +1868,6 @@ key_256_finalize: ret SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) -#endif /* CONFIG_AS_AVX */ - #ifdef CONFIG_AS_AVX2 ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 75b6ea20491e..655ad6bc8810 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -185,7 +185,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = { .finalize = &aesni_gcm_finalize, }; -#ifdef CONFIG_AS_AVX asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, void *keys, u8 *out, unsigned int num_bytes); asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, @@ -234,8 +233,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { .finalize = &aesni_gcm_finalize_avx_gen2, }; -#endif - #ifdef CONFIG_AS_AVX2 /* * asmlinkage void aesni_gcm_init_avx_gen4() @@ -476,7 +473,6 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, crypto_inc(ctrblk, AES_BLOCK_SIZE); } -#ifdef CONFIG_AS_AVX static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv) { @@ -493,7 +489,6 @@ static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, else aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); } -#endif static int ctr_crypt(struct skcipher_request *req) { @@ -715,10 +710,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4) gcm_tfm = &aesni_gcm_tfm_avx_gen2; #endif -#ifdef CONFIG_AS_AVX if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2) gcm_tfm = &aesni_gcm_tfm_sse; -#endif /* Linearize assoc, if not already linear */ if (req->src->length >= assoclen && req->src->length && @@ -1082,24 +1075,19 @@ static int __init aesni_init(void) aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4; } else #endif -#ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2; - } else -#endif - { + } else { pr_info("SSE version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_sse; } aesni_ctr_enc_tfm = aesni_ctr_enc; -#ifdef CONFIG_AS_AVX if (boot_cpu_has(X86_FEATURE_AVX)) { /* optimize performance of ctr mode encryption transform */ aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; pr_info("AES CTR mode by8 optimization enabled\n"); } -#endif #endif err = crypto_register_alg(&aesni_cipher_alg); diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl index 7a6b5380a46f..5bac2d533104 100644 --- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl +++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl @@ -404,10 +404,6 @@ ___ &end_function("poly1305_emit_x86_64"); if ($avx) { -if($kernel) { - $code .= "#ifdef CONFIG_AS_AVX\n"; -} - ######################################################################## # Layout of opaque area is following. # @@ -1516,10 +1512,6 @@ $code.=<<___; ___ &end_function("poly1305_emit_avx"); -if ($kernel) { - $code .= "#endif\n"; -} - if ($avx>1) { if ($kernel) { diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index 79bb58737d52..4a6226e1d15e 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -94,7 +94,7 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || PAGE_SIZE % POLY1305_BLOCK_SIZE); - if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) || + if (!static_branch_likely(&poly1305_use_avx) || (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || !crypto_simd_usable()) { convert_to_base2_64(ctx); @@ -123,7 +123,7 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], const u32 nonce[4]) { - if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx)) + if (!static_branch_likely(&poly1305_use_avx)) poly1305_emit_x86_64(ctx, mac, nonce); else poly1305_emit_avx(ctx, mac, nonce); @@ -261,7 +261,7 @@ static struct shash_alg alg = { static int __init poly1305_simd_mod_init(void) { - if (IS_ENABLED(CONFIG_AS_AVX) && boot_cpu_has(X86_FEATURE_AVX) && + if (boot_cpu_has(X86_FEATURE_AVX) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) static_branch_enable(&poly1305_use_avx); if (IS_ENABLED(CONFIG_AS_AVX2) && boot_cpu_has(X86_FEATURE_AVX) && diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S index 12e2d19d7402..d25668d2a1e9 100644 --- a/arch/x86/crypto/sha1_ssse3_asm.S +++ b/arch/x86/crypto/sha1_ssse3_asm.S @@ -467,8 +467,6 @@ W_PRECALC_SSSE3 */ SHA1_VECTOR_ASM sha1_transform_ssse3 -#ifdef CONFIG_AS_AVX - .macro W_PRECALC_AVX .purgem W_PRECALC_00_15 @@ -553,5 +551,3 @@ W_PRECALC_AVX * const u8 *data, int blocks); */ SHA1_VECTOR_ASM sha1_transform_avx - -#endif diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index d70b40ad594c..275b65dd30c9 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -114,7 +114,6 @@ static void unregister_sha1_ssse3(void) crypto_unregister_shash(&sha1_ssse3_alg); } -#ifdef CONFIG_AS_AVX asmlinkage void sha1_transform_avx(struct sha1_state *state, const u8 *data, int blocks); @@ -175,13 +174,7 @@ static void unregister_sha1_avx(void) crypto_unregister_shash(&sha1_avx_alg); } -#else /* CONFIG_AS_AVX */ -static inline int register_sha1_avx(void) { return 0; } -static inline void unregister_sha1_avx(void) { } -#endif /* CONFIG_AS_AVX */ - - -#if defined(CONFIG_AS_AVX2) && (CONFIG_AS_AVX) +#if defined(CONFIG_AS_AVX2) #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ asmlinkage void sha1_transform_avx2(struct sha1_state *state, diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S index fcbc30f58c38..4739cd31b9db 100644 --- a/arch/x86/crypto/sha256-avx-asm.S +++ b/arch/x86/crypto/sha256-avx-asm.S @@ -47,7 +47,6 @@ # This code schedules 1 block at a time, with 4 lanes per block ######################################################################## -#ifdef CONFIG_AS_AVX #include ## assume buffers not aligned @@ -498,5 +497,3 @@ _SHUF_00BA: # shuffle xDxC -> DC00 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF - -#endif diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 03ad657c04bd..8bdc3be31f64 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -144,7 +144,6 @@ static void unregister_sha256_ssse3(void) ARRAY_SIZE(sha256_ssse3_algs)); } -#ifdef CONFIG_AS_AVX asmlinkage void sha256_transform_avx(struct sha256_state *state, const u8 *data, int blocks); @@ -221,12 +220,7 @@ static void unregister_sha256_avx(void) ARRAY_SIZE(sha256_avx_algs)); } -#else -static inline int register_sha256_avx(void) { return 0; } -static inline void unregister_sha256_avx(void) { } -#endif - -#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) +#if defined(CONFIG_AS_AVX2) asmlinkage void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks); diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S index 90ea945ba5e6..63470fd6ae32 100644 --- a/arch/x86/crypto/sha512-avx-asm.S +++ b/arch/x86/crypto/sha512-avx-asm.S @@ -47,7 +47,6 @@ # ######################################################################## -#ifdef CONFIG_AS_AVX #include .text @@ -424,4 +423,3 @@ K512: .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 -#endif diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 1c444f41037c..75214982a633 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -142,7 +142,6 @@ static void unregister_sha512_ssse3(void) ARRAY_SIZE(sha512_ssse3_algs)); } -#ifdef CONFIG_AS_AVX asmlinkage void sha512_transform_avx(struct sha512_state *state, const u8 *data, int blocks); static bool avx_usable(void) @@ -218,12 +217,8 @@ static void unregister_sha512_avx(void) crypto_unregister_shashes(sha512_avx_algs, ARRAY_SIZE(sha512_avx_algs)); } -#else -static inline int register_sha512_avx(void) { return 0; } -static inline void unregister_sha512_avx(void) { } -#endif -#if defined(CONFIG_AS_AVX2) && defined(CONFIG_AS_AVX) +#if defined(CONFIG_AS_AVX2) asmlinkage void sha512_transform_rorx(struct sha512_state *state, const u8 *data, int blocks); diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h index d61ddf3d052b..0c4e5b5e3852 100644 --- a/arch/x86/include/asm/xor_avx.h +++ b/arch/x86/include/asm/xor_avx.h @@ -11,8 +11,6 @@ * Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines */ -#ifdef CONFIG_AS_AVX - #include #include @@ -170,11 +168,4 @@ do { \ #define AVX_SELECT(FASTEST) \ (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE) ? &xor_block_avx : FASTEST) -#else - -#define AVX_XOR_SPEED {} - -#define AVX_SELECT(FASTEST) (FASTEST) - -#endif #endif -- cgit v1.2.3 From 5e8ebd841a44b895e2bab5e874ff7d333ca31135 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 26 Mar 2020 17:00:58 +0900 Subject: x86: probe assembler capabilities via kconfig instead of makefile Doing this probing inside of the Makefiles means we have a maze of ifdefs inside the source code and child Makefiles that need to make proper decisions on this too. Instead, we do it at Kconfig time, like many other compiler and assembler options, which allows us to set up the dependencies normally for full compilation units. In the process, the ADX test changes to use %eax instead of %r10 so that it's valid in both 32-bit and 64-bit mode. Signed-off-by: Jason A. Donenfeld Acked-by: Ingo Molnar Reviewed-by: Nick Desaulniers Signed-off-by: Masahiro Yamada --- arch/x86/Kconfig | 2 ++ arch/x86/Kconfig.assembler | 17 +++++++++++++++++ arch/x86/Makefile | 10 ---------- net/netfilter/Makefile | 2 +- 4 files changed, 20 insertions(+), 11 deletions(-) create mode 100644 arch/x86/Kconfig.assembler (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8d078642b4be..70898a68fa1a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2931,3 +2931,5 @@ config HAVE_ATOMIC_IOMAP source "drivers/firmware/Kconfig" source "arch/x86/kvm/Kconfig" + +source "arch/x86/Kconfig.assembler" diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler new file mode 100644 index 000000000000..91230bf11a14 --- /dev/null +++ b/arch/x86/Kconfig.assembler @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020 Jason A. Donenfeld . All Rights Reserved. + +config AS_AVX2 + def_bool $(as-instr,vpbroadcastb %xmm0$(comma)%ymm1) + +config AS_AVX512 + def_bool $(as-instr,vpmovm2b %k1$(comma)%zmm5) + +config AS_SHA1_NI + def_bool $(as-instr,sha1msg1 %xmm0$(comma)%xmm1) + +config AS_SHA256_NI + def_bool $(as-instr,sha256msg1 %xmm0$(comma)%xmm1) + +config AS_ADX + def_bool $(as-instr,adox %eax$(comma)%eax) diff --git a/arch/x86/Makefile b/arch/x86/Makefile index f32ef7b8d5ca..b65ec63c7db7 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -177,16 +177,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1) KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,) endif -# does binutils support specific instructions? -avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1) -avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) -sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1) -sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) -adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) - -KBUILD_AFLAGS += $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) -KBUILD_CFLAGS += $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) - KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) # diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 292e71dc7ba4..ed4e52270520 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -83,7 +83,7 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \ nft_set_pipapo.o ifdef CONFIG_X86_64 -ifneq (,$(findstring -DCONFIG_AS_AVX2=1,$(KBUILD_CFLAGS))) +ifdef CONFIG_AS_AVX2 nf_tables-objs += nft_set_pipapo_avx2.o endif endif -- cgit v1.2.3 From e9e070cfe141e73588f1e48ca3f51d1183b8b646 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 17:00:59 +0900 Subject: x86: add comments about the binutils version to support code in as-instr We raise the minimal supported binutils version from time to time. The last bump was commit 1fb12b35e5ff ("kbuild: Raise the minimum required binutils version to 2.21"). We have these as-instr tests because binutils 2.21 does not support them. When we bump the binutils version next time, this will be a good hint to find out which one can be dropped. As for the Clang/LLVM builds, we require very new LLVM version, so the LLVM integrated assembler supports all of them. Signed-off-by: Masahiro Yamada Acked-by: Jason A. Donenfeld Acked-by: Ingo Molnar Acked-by: Nick Desaulniers --- arch/x86/Kconfig.assembler | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler index 91230bf11a14..a5a1d2766b3a 100644 --- a/arch/x86/Kconfig.assembler +++ b/arch/x86/Kconfig.assembler @@ -3,15 +3,25 @@ config AS_AVX2 def_bool $(as-instr,vpbroadcastb %xmm0$(comma)%ymm1) + help + Supported by binutils >= 2.22 and LLVM integrated assembler config AS_AVX512 def_bool $(as-instr,vpmovm2b %k1$(comma)%zmm5) + help + Supported by binutils >= 2.25 and LLVM integrated assembler config AS_SHA1_NI def_bool $(as-instr,sha1msg1 %xmm0$(comma)%xmm1) + help + Supported by binutils >= 2.24 and LLVM integrated assembler config AS_SHA256_NI def_bool $(as-instr,sha256msg1 %xmm0$(comma)%xmm1) + help + Supported by binutils >= 2.24 and LLVM integrated assembler config AS_ADX def_bool $(as-instr,adox %eax$(comma)%eax) + help + Supported by binutils >= 2.23 and LLVM integrated assembler -- cgit v1.2.3 From 4dcbfc35f7da66609cf56690586a72ca0ad279f8 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 26 Mar 2020 17:01:00 +0900 Subject: crypto: x86 - rework configuration based on Kconfig Now that assembler capabilities are probed inside of Kconfig, we can set up proper Kconfig-based dependencies. We also take this opportunity to reorder the Makefile, so that items are grouped logically by primitive. Signed-off-by: Jason A. Donenfeld Acked-by: Herbert Xu Acked-by: Ingo Molnar Signed-off-by: Masahiro Yamada --- arch/x86/crypto/Makefile | 152 ++++++++++++++++++++--------------------------- crypto/Kconfig | 8 +-- 2 files changed, 69 insertions(+), 91 deletions(-) (limited to 'arch') diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 1a044908d42d..2f23f08fdd4b 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -1,122 +1,100 @@ # SPDX-License-Identifier: GPL-2.0 # -# Arch-specific CryptoAPI modules. -# +# x86 crypto algorithms OBJECT_FILES_NON_STANDARD := y -avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\ - $(comma)4)$(comma)%ymm2,yes,no) -avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no) -sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no) -sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no) -adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no) - obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o +twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o +obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o +twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o +obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o +twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o +obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o +twofish-avx-x86_64-y := twofish-avx-x86_64-asm_64.o twofish_avx_glue.o + obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o +serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o +obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o +serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o +obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o +serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o +obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o +serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o +des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o + obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o +camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o +obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += camellia-aesni-avx-x86_64.o +camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o camellia_aesni_avx_glue.o +obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o +camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o + obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o -obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o -obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o +blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o + +obj-$(CONFIG_CRYPTO_CAST5_AVX_X86_64) += cast5-avx-x86_64.o +cast5-avx-x86_64-y := cast5-avx-x86_64-asm_64.o cast5_avx_glue.o + +obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o +cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o + +obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o +aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o + obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o -obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o +chacha-x86_64-y := chacha-ssse3-x86_64.o chacha_glue.o +chacha-x86_64-$(CONFIG_AS_AVX2) += chacha-avx2-x86_64.o +chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o + obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o -obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o +aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o +aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o -obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o -obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o -obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o -obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o -obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o -obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o - -obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o +sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o +sha1-ssse3-$(CONFIG_AS_AVX2) += sha1_avx2_x86_64_asm.o +sha1-ssse3-$(CONFIG_AS_SHA1_NI) += sha1_ni_asm.o -obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o -obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o +obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o +sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o +sha256-ssse3-$(CONFIG_AS_SHA256_NI) += sha256_ni_asm.o -# These modules require the assembler to support ADX. -ifeq ($(adx_supported),yes) - obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o -endif +obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o +sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o -# These modules require assembler to support AVX. -obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += camellia-aesni-avx-x86_64.o -obj-$(CONFIG_CRYPTO_CAST5_AVX_X86_64) += cast5-avx-x86_64.o -obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o -obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o -obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o +blake2s-x86_64-y := blake2s-core.o blake2s-glue.o -# These modules require assembler to support AVX2. -ifeq ($(avx2_supported),yes) - obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o - obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o -endif +obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o +ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o -twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o -serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o +obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o +crc32c-intel-y := crc32c-intel_glue.o +crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o -des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o -camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o -blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o -twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o -twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o -chacha-x86_64-y := chacha-ssse3-x86_64.o chacha_glue.o -serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o +obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o +crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o -aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o +obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o +crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o -nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o -blake2s-x86_64-y := blake2s-core.o blake2s-glue.o +obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),) targets += poly1305-x86_64-cryptogams.S endif -camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \ - camellia_aesni_avx_glue.o -cast5-avx-x86_64-y := cast5-avx-x86_64-asm_64.o cast5_avx_glue.o -cast6-avx-x86_64-y := cast6-avx-x86_64-asm_64.o cast6_avx_glue.o -twofish-avx-x86_64-y := twofish-avx-x86_64-asm_64.o twofish_avx_glue.o -serpent-avx-x86_64-y := serpent-avx-x86_64-asm_64.o serpent_avx_glue.o - -ifeq ($(avx2_supported),yes) - camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o - chacha-x86_64-y += chacha-avx2-x86_64.o - serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o - - nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o -endif - -ifeq ($(avx512_supported),yes) - chacha-x86_64-y += chacha-avx512vl-x86_64.o -endif +obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o +nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o +obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o +nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o -aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o -aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o -ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o -sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o -ifeq ($(avx2_supported),yes) -sha1-ssse3-y += sha1_avx2_x86_64_asm.o -endif -ifeq ($(sha1_ni_supported),yes) -sha1-ssse3-y += sha1_ni_asm.o -endif -crc32c-intel-y := crc32c-intel_glue.o -crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o -crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o -sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o -ifeq ($(sha256_ni_supported),yes) -sha256-ssse3-y += sha256_ni_asm.o -endif -sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o -crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o +obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ diff --git a/crypto/Kconfig b/crypto/Kconfig index c24a47406f8f..49aae167e75c 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -267,7 +267,7 @@ config CRYPTO_CURVE25519 config CRYPTO_CURVE25519_X86 tristate "x86_64 accelerated Curve25519 scalar multiplication library" - depends on X86 && 64BIT + depends on X86 && 64BIT && AS_ADX select CRYPTO_LIB_CURVE25519_GENERIC select CRYPTO_ARCH_HAVE_LIB_CURVE25519 @@ -465,7 +465,7 @@ config CRYPTO_NHPOLY1305_SSE2 config CRYPTO_NHPOLY1305_AVX2 tristate "NHPoly1305 hash function (x86_64 AVX2 implementation)" - depends on X86 && 64BIT + depends on X86 && 64BIT && AS_AVX2 select CRYPTO_NHPOLY1305 help AVX2 optimized implementation of the hash function used by the @@ -1303,7 +1303,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)" - depends on X86 && 64BIT + depends on X86 && 64BIT && AS_AVX2 depends on CRYPTO select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 help @@ -1573,7 +1573,7 @@ config CRYPTO_SERPENT_AVX_X86_64 config CRYPTO_SERPENT_AVX2_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX2)" - depends on X86 && 64BIT + depends on X86 && 64BIT && AS_AVX2 select CRYPTO_SERPENT_AVX_X86_64 help Serpent cipher algorithm, by Anderson, Biham & Knudsen. -- cgit v1.2.3 From d7e40ea83eb9155bd1d6bbbb48ffb843a8f56120 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 26 Mar 2020 17:01:04 +0900 Subject: crypto: x86 - clean up poly1305-x86_64-cryptogams.S by 'make clean' poly1305-x86_64-cryptogams.S is a generated file, so it should be cleaned up by 'make clean'. Assigning it to the variable 'targets' teaches Kbuild that it is a generated file. However, this line is not evaluated when cleaning because scripts/Makefile.clean does not include include/config/auto.conf. Remove the ifneq-conditional, so this file is correctly cleaned up. Signed-off-by: Masahiro Yamada Acked-by: Herbert Xu Acked-by: Ingo Molnar --- arch/x86/crypto/Makefile | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 2f23f08fdd4b..d1ded16c1dcd 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -85,9 +85,7 @@ crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o -ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),) targets += poly1305-x86_64-cryptogams.S -endif obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o -- cgit v1.2.3 From e6abef610c7363cbd25205674b962031ef3bc790 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 26 Mar 2020 14:26:00 -0600 Subject: x86: update AS_* macros to binutils >=2.23, supporting ADX and AVX2 Now that the kernel specifies binutils 2.23 as the minimum version, we can remove ifdefs for AVX2 and ADX throughout. Signed-off-by: Jason A. Donenfeld Acked-by: Ingo Molnar Reviewed-by: Nick Desaulniers Signed-off-by: Masahiro Yamada --- arch/x86/Kconfig.assembler | 10 ---------- arch/x86/crypto/Makefile | 6 ++---- arch/x86/crypto/aesni-intel_avx-x86_64.S | 3 --- arch/x86/crypto/aesni-intel_glue.c | 7 ------- arch/x86/crypto/chacha_glue.c | 6 ++---- arch/x86/crypto/poly1305-x86_64-cryptogams.pl | 8 -------- arch/x86/crypto/poly1305_glue.c | 5 ++--- arch/x86/crypto/sha1_ssse3_glue.c | 6 ------ arch/x86/crypto/sha256-avx2-asm.S | 3 --- arch/x86/crypto/sha256_ssse3_glue.c | 6 ------ arch/x86/crypto/sha512-avx2-asm.S | 3 --- arch/x86/crypto/sha512_ssse3_glue.c | 5 ----- crypto/Kconfig | 8 ++++---- lib/raid6/algos.c | 6 ------ lib/raid6/avx2.c | 4 ---- lib/raid6/recov_avx2.c | 6 ------ lib/raid6/test/Makefile | 3 --- net/netfilter/Makefile | 2 +- net/netfilter/nf_tables_api.c | 2 +- net/netfilter/nft_set_pipapo.c | 2 +- net/netfilter/nft_set_pipapo_avx2.h | 4 ++-- 21 files changed, 15 insertions(+), 90 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig.assembler b/arch/x86/Kconfig.assembler index a5a1d2766b3a..13de0db38d4e 100644 --- a/arch/x86/Kconfig.assembler +++ b/arch/x86/Kconfig.assembler @@ -1,11 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # Copyright (C) 2020 Jason A. Donenfeld . All Rights Reserved. -config AS_AVX2 - def_bool $(as-instr,vpbroadcastb %xmm0$(comma)%ymm1) - help - Supported by binutils >= 2.22 and LLVM integrated assembler - config AS_AVX512 def_bool $(as-instr,vpmovm2b %k1$(comma)%zmm5) help @@ -20,8 +15,3 @@ config AS_SHA256_NI def_bool $(as-instr,sha256msg1 %xmm0$(comma)%xmm1) help Supported by binutils >= 2.24 and LLVM integrated assembler - -config AS_ADX - def_bool $(as-instr,adox %eax$(comma)%eax) - help - Supported by binutils >= 2.23 and LLVM integrated assembler diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index d1ded16c1dcd..a31de0c6ccde 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -47,8 +47,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o -chacha-x86_64-y := chacha-ssse3-x86_64.o chacha_glue.o -chacha-x86_64-$(CONFIG_AS_AVX2) += chacha-avx2-x86_64.o +chacha-x86_64-y := chacha-avx2-x86_64.o chacha-ssse3-x86_64.o chacha_glue.o chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o @@ -56,8 +55,7 @@ aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o -sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o -sha1-ssse3-$(CONFIG_AS_AVX2) += sha1_avx2_x86_64_asm.o +sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o sha1-ssse3-$(CONFIG_AS_SHA1_NI) += sha1_ni_asm.o obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S index cc56ee43238b..0cea33295287 100644 --- a/arch/x86/crypto/aesni-intel_avx-x86_64.S +++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S @@ -1868,7 +1868,6 @@ key_256_finalize: ret SYM_FUNC_END(aesni_gcm_finalize_avx_gen2) -#ifdef CONFIG_AS_AVX2 ############################################################################### # GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0) # Input: A and B (128-bits each, bit-reflected) @@ -2836,5 +2835,3 @@ key_256_finalize4: FUNC_RESTORE ret SYM_FUNC_END(aesni_gcm_finalize_avx_gen4) - -#endif /* CONFIG_AS_AVX2 */ diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 655ad6bc8810..ad8a7188a2bf 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -233,7 +233,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = { .finalize = &aesni_gcm_finalize_avx_gen2, }; -#ifdef CONFIG_AS_AVX2 /* * asmlinkage void aesni_gcm_init_avx_gen4() * gcm_data *my_ctx_data, context data @@ -276,8 +275,6 @@ static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = { .finalize = &aesni_gcm_finalize_avx_gen4, }; -#endif - static inline struct aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm) { @@ -706,10 +703,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req, if (!enc) left -= auth_tag_len; -#ifdef CONFIG_AS_AVX2 if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4) gcm_tfm = &aesni_gcm_tfm_avx_gen2; -#endif if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2) gcm_tfm = &aesni_gcm_tfm_sse; @@ -1069,12 +1064,10 @@ static int __init aesni_init(void) if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; #ifdef CONFIG_X86_64 -#ifdef CONFIG_AS_AVX2 if (boot_cpu_has(X86_FEATURE_AVX2)) { pr_info("AVX2 version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4; } else -#endif if (boot_cpu_has(X86_FEATURE_AVX)) { pr_info("AVX version of gcm_enc/dec engaged.\n"); aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2; diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index 68a74953efaf..b412c21ee06e 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -79,8 +79,7 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, } } - if (IS_ENABLED(CONFIG_AS_AVX2) && - static_branch_likely(&chacha_use_avx2)) { + if (static_branch_likely(&chacha_use_avx2)) { while (bytes >= CHACHA_BLOCK_SIZE * 8) { chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); bytes -= CHACHA_BLOCK_SIZE * 8; @@ -288,8 +287,7 @@ static int __init chacha_simd_mod_init(void) static_branch_enable(&chacha_use_simd); - if (IS_ENABLED(CONFIG_AS_AVX2) && - boot_cpu_has(X86_FEATURE_AVX) && + if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { static_branch_enable(&chacha_use_avx2); diff --git a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl index 5bac2d533104..137edcf038cb 100644 --- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl +++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl @@ -1514,10 +1514,6 @@ ___ if ($avx>1) { -if ($kernel) { - $code .= "#ifdef CONFIG_AS_AVX2\n"; -} - my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) = map("%ymm$_",(0..15)); my $S4=$MASK; @@ -2808,10 +2804,6 @@ ___ poly1305_blocks_avxN(0); &end_function("poly1305_blocks_avx2"); -if($kernel) { - $code .= "#endif\n"; -} - ####################################################################### if ($avx>2) { # On entry we have input length divisible by 64. But since inner loop diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index 4a6226e1d15e..6dfec19f7d57 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -108,7 +108,7 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, kernel_fpu_begin(); if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512)) poly1305_blocks_avx512(ctx, inp, bytes, padbit); - else if (IS_ENABLED(CONFIG_AS_AVX2) && static_branch_likely(&poly1305_use_avx2)) + else if (static_branch_likely(&poly1305_use_avx2)) poly1305_blocks_avx2(ctx, inp, bytes, padbit); else poly1305_blocks_avx(ctx, inp, bytes, padbit); @@ -264,8 +264,7 @@ static int __init poly1305_simd_mod_init(void) if (boot_cpu_has(X86_FEATURE_AVX) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) static_branch_enable(&poly1305_use_avx); - if (IS_ENABLED(CONFIG_AS_AVX2) && boot_cpu_has(X86_FEATURE_AVX) && - boot_cpu_has(X86_FEATURE_AVX2) && + if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_AVX2) && cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) static_branch_enable(&poly1305_use_avx2); if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) && diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c index 275b65dd30c9..a801ffc10cbb 100644 --- a/arch/x86/crypto/sha1_ssse3_glue.c +++ b/arch/x86/crypto/sha1_ssse3_glue.c @@ -174,7 +174,6 @@ static void unregister_sha1_avx(void) crypto_unregister_shash(&sha1_avx_alg); } -#if defined(CONFIG_AS_AVX2) #define SHA1_AVX2_BLOCK_OPTSIZE 4 /* optimal 4*64 bytes of SHA1 blocks */ asmlinkage void sha1_transform_avx2(struct sha1_state *state, @@ -246,11 +245,6 @@ static void unregister_sha1_avx2(void) crypto_unregister_shash(&sha1_avx2_alg); } -#else -static inline int register_sha1_avx2(void) { return 0; } -static inline void unregister_sha1_avx2(void) { } -#endif - #ifdef CONFIG_AS_SHA1_NI asmlinkage void sha1_ni_transform(struct sha1_state *digest, const u8 *data, int rounds); diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S index 499d9ec129de..11ff60c29c8b 100644 --- a/arch/x86/crypto/sha256-avx2-asm.S +++ b/arch/x86/crypto/sha256-avx2-asm.S @@ -48,7 +48,6 @@ # This code schedules 2 blocks at a time, with 4 lanes per block ######################################################################## -#ifdef CONFIG_AS_AVX2 #include ## assume buffers not aligned @@ -767,5 +766,3 @@ _SHUF_00BA: .align 32 _SHUF_DC00: .octa 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF - -#endif diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c index 8bdc3be31f64..6394b5fe8db6 100644 --- a/arch/x86/crypto/sha256_ssse3_glue.c +++ b/arch/x86/crypto/sha256_ssse3_glue.c @@ -220,7 +220,6 @@ static void unregister_sha256_avx(void) ARRAY_SIZE(sha256_avx_algs)); } -#if defined(CONFIG_AS_AVX2) asmlinkage void sha256_transform_rorx(struct sha256_state *state, const u8 *data, int blocks); @@ -295,11 +294,6 @@ static void unregister_sha256_avx2(void) ARRAY_SIZE(sha256_avx2_algs)); } -#else -static inline int register_sha256_avx2(void) { return 0; } -static inline void unregister_sha256_avx2(void) { } -#endif - #ifdef CONFIG_AS_SHA256_NI asmlinkage void sha256_ni_transform(struct sha256_state *digest, const u8 *data, int rounds); diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S index 3dd886b14e7d..3a44bdcfd583 100644 --- a/arch/x86/crypto/sha512-avx2-asm.S +++ b/arch/x86/crypto/sha512-avx2-asm.S @@ -49,7 +49,6 @@ # This code schedules 1 blocks at a time, with 4 lanes per block ######################################################################## -#ifdef CONFIG_AS_AVX2 #include .text @@ -749,5 +748,3 @@ PSHUFFLE_BYTE_FLIP_MASK: MASK_YMM_LO: .octa 0x00000000000000000000000000000000 .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF - -#endif diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c index 75214982a633..82cc1b3ced1d 100644 --- a/arch/x86/crypto/sha512_ssse3_glue.c +++ b/arch/x86/crypto/sha512_ssse3_glue.c @@ -218,7 +218,6 @@ static void unregister_sha512_avx(void) ARRAY_SIZE(sha512_avx_algs)); } -#if defined(CONFIG_AS_AVX2) asmlinkage void sha512_transform_rorx(struct sha512_state *state, const u8 *data, int blocks); @@ -293,10 +292,6 @@ static void unregister_sha512_avx2(void) crypto_unregister_shashes(sha512_avx2_algs, ARRAY_SIZE(sha512_avx2_algs)); } -#else -static inline int register_sha512_avx2(void) { return 0; } -static inline void unregister_sha512_avx2(void) { } -#endif static int __init sha512_ssse3_mod_init(void) { diff --git a/crypto/Kconfig b/crypto/Kconfig index 49aae167e75c..c24a47406f8f 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -267,7 +267,7 @@ config CRYPTO_CURVE25519 config CRYPTO_CURVE25519_X86 tristate "x86_64 accelerated Curve25519 scalar multiplication library" - depends on X86 && 64BIT && AS_ADX + depends on X86 && 64BIT select CRYPTO_LIB_CURVE25519_GENERIC select CRYPTO_ARCH_HAVE_LIB_CURVE25519 @@ -465,7 +465,7 @@ config CRYPTO_NHPOLY1305_SSE2 config CRYPTO_NHPOLY1305_AVX2 tristate "NHPoly1305 hash function (x86_64 AVX2 implementation)" - depends on X86 && 64BIT && AS_AVX2 + depends on X86 && 64BIT select CRYPTO_NHPOLY1305 help AVX2 optimized implementation of the hash function used by the @@ -1303,7 +1303,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64 config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 tristate "Camellia cipher algorithm (x86_64/AES-NI/AVX2)" - depends on X86 && 64BIT && AS_AVX2 + depends on X86 && 64BIT depends on CRYPTO select CRYPTO_CAMELLIA_AESNI_AVX_X86_64 help @@ -1573,7 +1573,7 @@ config CRYPTO_SERPENT_AVX_X86_64 config CRYPTO_SERPENT_AVX2_X86_64 tristate "Serpent cipher algorithm (x86_64/AVX2)" - depends on X86 && 64BIT && AS_AVX2 + depends on X86 && 64BIT select CRYPTO_SERPENT_AVX_X86_64 help Serpent cipher algorithm, by Anderson, Biham & Knudsen. diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index df08664d3432..6d5e5000fdd7 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -34,10 +34,8 @@ const struct raid6_calls * const raid6_algos[] = { &raid6_avx512x2, &raid6_avx512x1, #endif -#ifdef CONFIG_AS_AVX2 &raid6_avx2x2, &raid6_avx2x1, -#endif &raid6_sse2x2, &raid6_sse2x1, &raid6_sse1x2, @@ -51,11 +49,9 @@ const struct raid6_calls * const raid6_algos[] = { &raid6_avx512x2, &raid6_avx512x1, #endif -#ifdef CONFIG_AS_AVX2 &raid6_avx2x4, &raid6_avx2x2, &raid6_avx2x1, -#endif &raid6_sse2x4, &raid6_sse2x2, &raid6_sse2x1, @@ -101,9 +97,7 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = { #ifdef CONFIG_AS_AVX512 &raid6_recov_avx512, #endif -#ifdef CONFIG_AS_AVX2 &raid6_recov_avx2, -#endif &raid6_recov_ssse3, #endif #ifdef CONFIG_S390 diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c index 87184b6da28a..f299476e1d76 100644 --- a/lib/raid6/avx2.c +++ b/lib/raid6/avx2.c @@ -13,8 +13,6 @@ * */ -#ifdef CONFIG_AS_AVX2 - #include #include "x86.h" @@ -470,5 +468,3 @@ const struct raid6_calls raid6_avx2x4 = { 1 /* Has cache hints */ }; #endif - -#endif /* CONFIG_AS_AVX2 */ diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c index 7a3b5e7f66ee..4e8095403ee2 100644 --- a/lib/raid6/recov_avx2.c +++ b/lib/raid6/recov_avx2.c @@ -4,8 +4,6 @@ * Author: Jim Kukunas */ -#ifdef CONFIG_AS_AVX2 - #include #include "x86.h" @@ -313,7 +311,3 @@ const struct raid6_recov_calls raid6_recov_avx2 = { #endif .priority = 2, }; - -#else -#warning "your version of binutils lacks AVX2 support" -#endif diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 60021319ac78..a4c7cd74cff5 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -35,9 +35,6 @@ endif ifeq ($(IS_X86),yes) OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o CFLAGS += -DCONFIG_X86 - CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \ - gcc -c -x assembler - >/dev/null 2>&1 && \ - rm ./-.o && echo -DCONFIG_AS_AVX2=1) CFLAGS += $(shell echo "vpmovm2b %k1, %zmm5" | \ gcc -c -x assembler - >/dev/null 2>&1 && \ rm ./-.o && echo -DCONFIG_AS_AVX512=1) diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index ed4e52270520..0e0ded87e27b 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -83,7 +83,7 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \ nft_set_pipapo.o ifdef CONFIG_X86_64 -ifdef CONFIG_AS_AVX2 +ifndef CONFIG_UML nf_tables-objs += nft_set_pipapo_avx2.o endif endif diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index d0ab5ffa1e2c..4471393da6d8 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3291,7 +3291,7 @@ static const struct nft_set_type *nft_set_types[] = { &nft_set_rhash_type, &nft_set_bitmap_type, &nft_set_rbtree_type, -#if defined(CONFIG_X86_64) && defined(CONFIG_AS_AVX2) +#if defined(CONFIG_X86_64) && !defined(CONFIG_UML) &nft_set_pipapo_avx2_type, #endif &nft_set_pipapo_type, diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 87aabf651cfe..8b5acc6910fd 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -2201,7 +2201,7 @@ const struct nft_set_type nft_set_pipapo_type = { }, }; -#if defined(CONFIG_X86_64) && defined(CONFIG_AS_AVX2) +#if defined(CONFIG_X86_64) && !defined(CONFIG_UML) const struct nft_set_type nft_set_pipapo_avx2_type = { .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT, diff --git a/net/netfilter/nft_set_pipapo_avx2.h b/net/netfilter/nft_set_pipapo_avx2.h index 396caf7bfca8..394bcb704db7 100644 --- a/net/netfilter/nft_set_pipapo_avx2.h +++ b/net/netfilter/nft_set_pipapo_avx2.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _NFT_SET_PIPAPO_AVX2_H -#ifdef CONFIG_AS_AVX2 +#if defined(CONFIG_X86_64) && !defined(CONFIG_UML) #include #define NFT_PIPAPO_ALIGN (XSAVE_YMM_SIZE / BITS_PER_BYTE) @@ -9,6 +9,6 @@ bool nft_pipapo_avx2_lookup(const struct net *net, const struct nft_set *set, const u32 *key, const struct nft_set_ext **ext); bool nft_pipapo_avx2_estimate(const struct nft_set_desc *desc, u32 features, struct nft_set_estimate *est); -#endif /* CONFIG_AS_AVX2 */ +#endif /* defined(CONFIG_X86_64) && !defined(CONFIG_UML) */ #endif /* _NFT_SET_PIPAPO_AVX2_H */ -- cgit v1.2.3 From 2a5e5d0c966945f53e50f671babb50a4c201eabd Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 6 Apr 2020 01:30:52 +0900 Subject: MIPS: fw: arc: add __weak to prom_meminit and prom_free_prom_memory As far as I understood, prom_meminit() in arch/mips/fw/arc/memory.c is overridden by the one in arch/mips/sgi-ip32/ip32-memory.c if CONFIG_SGI_IP32 is enabled. The use of EXPORT_SYMBOL in static libraries potentially causes a problem for the llvm linker [1]. So, I want to forcibly link lib-y objects to vmlinux when CONFIG_MODULES=y. As a groundwork, we must fix multiple definitions that have previously been hidden by lib-y. The prom_cleanup() in this file is already marked as __weak (because it is overridden by the one in arch/mips/sgi-ip22/ip22-mc.c). I think it should be OK to do the same for these two. [1]: https://github.com/ClangBuiltLinux/linux/issues/515 Reported-by: kbuild test robot Signed-off-by: Masahiro Yamada Acked-By: Thomas Bogendoerfer --- arch/mips/fw/arc/memory.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c index dbbcddc82823..89fa6e62a3b3 100644 --- a/arch/mips/fw/arc/memory.c +++ b/arch/mips/fw/arc/memory.c @@ -117,7 +117,7 @@ static int __init prom_memtype_classify(union linux_memtypes type) return memtype_classify_arc(type); } -void __init prom_meminit(void) +void __weak __init prom_meminit(void) { struct linux_mdesc *p; @@ -162,7 +162,7 @@ void __weak __init prom_cleanup(void) { } -void __init prom_free_prom_memory(void) +void __weak __init prom_free_prom_memory(void) { int i; -- cgit v1.2.3