lkml.org 
[lkml]   [2018]   [Jan]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:x86/pti] x86/retpoline/crypto: Convert crypto assembler indirect jumps
    Commit-ID:  b86d748af60a52fe5b2bfb28f4451f79e28d0b32
    Gitweb: https://git.kernel.org/tip/b86d748af60a52fe5b2bfb28f4451f79e28d0b32
    Author: David Woodhouse <dwmw@amazon.co.uk>
    AuthorDate: Tue, 9 Jan 2018 14:43:10 +0000
    Committer: Thomas Gleixner <tglx@linutronix.de>
    CommitDate: Wed, 10 Jan 2018 19:09:10 +0100

    x86/retpoline/crypto: Convert crypto assembler indirect jumps

    Convert all indirect jumps in crypto assembler code to use non-speculative
    sequences when CONFIG_RETPOLINE is enabled.

    Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Acked-by: Arjan van de Ven <arjan@linux.intel.com>
    Acked-by: Ingo Molnar <mingo@kernel.org>
    Cc: gnomes@lxorguk.ukuu.org.uk
    Cc: Rik van Riel <riel@redhat.com>
    Cc: Andi Kleen <ak@linux.intel.com>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Jiri Kosina <jikos@kernel.org>
    Cc: Andy Lutomirski <luto@amacapital.net>
    Cc: Dave Hansen <dave.hansen@intel.com>
    Cc: Kees Cook <keescook@google.com>
    Cc: Tim Chen <tim.c.chen@linux.intel.com>
    Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
    Cc: Paul Turner <pjt@google.com>
    Link: https://lkml.kernel.org/r/1515508997-6154-5-git-send-email-dwmw@amazon.co.uk


    ---
    arch/x86/crypto/aesni-intel_asm.S | 5 +++--
    arch/x86/crypto/camellia-aesni-avx-asm_64.S | 3 ++-
    arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 3 ++-
    arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 3 ++-
    4 files changed, 9 insertions(+), 5 deletions(-)

    diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
    index 16627fe..3d09e3a 100644
    --- a/arch/x86/crypto/aesni-intel_asm.S
    +++ b/arch/x86/crypto/aesni-intel_asm.S
    @@ -32,6 +32,7 @@
    #include <linux/linkage.h>
    #include <asm/inst.h>
    #include <asm/frame.h>
    +#include <asm/nospec-branch.h>

    /*
    * The following macros are used to move an (un)aligned 16 byte value to/from
    @@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8)
    pxor INC, STATE4
    movdqu IV, 0x30(OUTP)

    - call *%r11
    + CALL_NOSPEC %r11

    movdqu 0x00(OUTP), INC
    pxor INC, STATE1
    @@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8)
    _aesni_gf128mul_x_ble()
    movups IV, (IVP)

    - call *%r11
    + CALL_NOSPEC %r11

    movdqu 0x40(OUTP), INC
    pxor INC, STATE1
    diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
    index f7c495e..a14af6e 100644
    --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
    +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
    @@ -17,6 +17,7 @@

    #include <linux/linkage.h>
    #include <asm/frame.h>
    +#include <asm/nospec-branch.h>

    #define CAMELLIA_TABLE_BYTE_LEN 272

    @@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way:
    vpxor 14 * 16(%rax), %xmm15, %xmm14;
    vpxor 15 * 16(%rax), %xmm15, %xmm15;

    - call *%r9;
    + CALL_NOSPEC %r9;

    addq $(16 * 16), %rsp;

    diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
    index eee5b39..b66bbfa 100644
    --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
    +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
    @@ -12,6 +12,7 @@

    #include <linux/linkage.h>
    #include <asm/frame.h>
    +#include <asm/nospec-branch.h>

    #define CAMELLIA_TABLE_BYTE_LEN 272

    @@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way:
    vpxor 14 * 32(%rax), %ymm15, %ymm14;
    vpxor 15 * 32(%rax), %ymm15, %ymm15;

    - call *%r9;
    + CALL_NOSPEC %r9;

    addq $(16 * 32), %rsp;

    diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
    index 7a7de27..d9b734d 100644
    --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
    +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
    @@ -45,6 +45,7 @@

    #include <asm/inst.h>
    #include <linux/linkage.h>
    +#include <asm/nospec-branch.h>

    ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction

    @@ -172,7 +173,7 @@ continue_block:
    movzxw (bufp, %rax, 2), len
    lea crc_array(%rip), bufp
    lea (bufp, len, 1), bufp
    - jmp *bufp
    + JMP_NOSPEC bufp

    ################################################################
    ## 2a) PROCESS FULL BLOCKS:
    \
     
     \ /
      Last update: 2018-01-14 23:21    [W:4.073 / U:0.400 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site