diff options
Diffstat (limited to 'arch/x86/crypto/crc32-pclmul_asm.S')
-rw-r--r-- | arch/x86/crypto/crc32-pclmul_asm.S | 47 |
1 files changed, 23 insertions, 24 deletions
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S index 9fd28ff65bc2..6e7d4c4d3208 100644 --- a/arch/x86/crypto/crc32-pclmul_asm.S +++ b/arch/x86/crypto/crc32-pclmul_asm.S @@ -38,7 +38,6 @@ */ #include <linux/linkage.h> -#include <asm/inst.h> .section .rodata @@ -129,17 +128,17 @@ loop_64:/* 64 bytes Full cache line folding */ #ifdef __x86_64__ movdqa %xmm4, %xmm8 #endif - PCLMULQDQ 00, CONSTANT, %xmm1 - PCLMULQDQ 00, CONSTANT, %xmm2 - PCLMULQDQ 00, CONSTANT, %xmm3 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm2 + pclmulqdq $0x00, CONSTANT, %xmm3 #ifdef __x86_64__ - PCLMULQDQ 00, CONSTANT, %xmm4 + pclmulqdq $0x00, CONSTANT, %xmm4 #endif - PCLMULQDQ 0x11, CONSTANT, %xmm5 - PCLMULQDQ 0x11, CONSTANT, %xmm6 - PCLMULQDQ 0x11, CONSTANT, %xmm7 + pclmulqdq $0x11, CONSTANT, %xmm5 + pclmulqdq $0x11, CONSTANT, %xmm6 + pclmulqdq $0x11, CONSTANT, %xmm7 #ifdef __x86_64__ - PCLMULQDQ 0x11, CONSTANT, %xmm8 + pclmulqdq $0x11, CONSTANT, %xmm8 #endif pxor %xmm5, %xmm1 pxor %xmm6, %xmm2 @@ -149,8 +148,8 @@ loop_64:/* 64 bytes Full cache line folding */ #else /* xmm8 unsupported for x32 */ movdqa %xmm4, %xmm5 - PCLMULQDQ 00, CONSTANT, %xmm4 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm4 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm4 #endif @@ -172,20 +171,20 @@ less_64:/* Folding cache line into 128bit */ prefetchnta (BUF) movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm2, %xmm1 movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm3, %xmm1 movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor %xmm4, %xmm1 @@ -193,8 +192,8 @@ less_64:/* Folding cache line into 128bit */ jb fold_64 loop_16:/* Folding rest buffer into 128bit */ movdqa %xmm1, %xmm5 - PCLMULQDQ 0x00, CONSTANT, %xmm1 - PCLMULQDQ 0x11, CONSTANT, %xmm5 + pclmulqdq $0x00, CONSTANT, %xmm1 + pclmulqdq $0x11, CONSTANT, %xmm5 pxor %xmm5, %xmm1 pxor (BUF), %xmm1 sub $0x10, LEN @@ -205,7 +204,7 @@ loop_16:/* Folding rest buffer into 128bit */ fold_64: /* perform the last 64 bit fold, also adds 32 zeroes * to the input stream */ - PCLMULQDQ 0x01, %xmm1, CONSTANT /* R4 * xmm1.low */ + pclmulqdq $0x01, %xmm1, CONSTANT /* R4 * xmm1.low */ psrldq $0x08, %xmm1 pxor CONSTANT, %xmm1 @@ -220,7 +219,7 @@ fold_64: #endif psrldq $0x04, %xmm2 pand %xmm3, %xmm1 - PCLMULQDQ 0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */ @@ -231,11 +230,11 @@ fold_64: #endif movdqa %xmm1, %xmm2 pand %xmm3, %xmm1 - PCLMULQDQ 0x10, CONSTANT, %xmm1 + pclmulqdq $0x10, CONSTANT, %xmm1 pand %xmm3, %xmm1 - PCLMULQDQ 0x00, CONSTANT, %xmm1 + pclmulqdq $0x00, CONSTANT, %xmm1 pxor %xmm2, %xmm1 - PEXTRD 0x01, %xmm1, %eax + pextrd $0x01, %xmm1, %eax ret SYM_FUNC_END(crc32_pclmul_le_16) |