From: Sasha Levin Date: Tue, 20 Mar 2018 20:44:18 +0000 (-0400) Subject: Revert "x86/retpoline/crypto: Convert crypto assembler indirect jumps" X-Git-Tag: v4.1.51~2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=2ae2efda4b14ad93415c2b9884cbac1ac9d0d794;p=thirdparty%2Fkernel%2Fstable.git Revert "x86/retpoline/crypto: Convert crypto assembler indirect jumps" This reverts commit 0153127f56d685b355e5adb5747f1d4463761756. Signed-off-by: Sasha Levin --- diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 3f93dedb5a4dc..6bd2c6c95373f 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -31,7 +31,6 @@ #include #include -#include /* * The following macros are used to move an (un)aligned 16 byte value to/from @@ -2715,7 +2714,7 @@ ENTRY(aesni_xts_crypt8) pxor INC, STATE4 movdqu IV, 0x30(OUTP) - CALL_NOSPEC %r11 + call *%r11 movdqu 0x00(OUTP), INC pxor INC, STATE1 @@ -2760,7 +2759,7 @@ ENTRY(aesni_xts_crypt8) _aesni_gf128mul_x_ble() movups IV, (IVP) - CALL_NOSPEC %r11 + call *%r11 movdqu 0x40(OUTP), INC pxor INC, STATE1 diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index 5881756f78a21..ce71f9212409f 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -16,7 +16,6 @@ */ #include -#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1211,7 +1210,7 @@ camellia_xts_crypt_16way: vpxor 14 * 16(%rax), %xmm15, %xmm14; vpxor 15 * 16(%rax), %xmm15, %xmm15; - CALL_NOSPEC %r9; + call *%r9; addq $(16 * 16), %rsp; diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index 0d45b04b490a8..0e0b8863a34bd 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -11,7 +11,6 @@ */ #include -#include #define CAMELLIA_TABLE_BYTE_LEN 272 @@ -1324,7 +1323,7 @@ camellia_xts_crypt_32way: vpxor 14 * 32(%rax), %ymm15, %ymm14; vpxor 15 * 32(%rax), %ymm15, %ymm15; - CALL_NOSPEC %r9; + call *%r9; addq $(16 * 32), %rsp; diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 5fc80c880a164..225be06edc809 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S @@ -45,7 +45,6 @@ #include #include -#include ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction @@ -173,7 +172,7 @@ continue_block: movzxw (bufp, %rax, 2), len offset=crc_array-jump_table lea offset(bufp, len, 1), bufp - JMP_NOSPEC bufp + jmp *bufp ################################################################ ## 2a) PROCESS FULL BLOCKS: