lkml.org 
[lkml]   [2019]   [Jan]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC/RFT PATCH 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP
    Date
    From: Eric Biggers <ebiggers@google.com>

    The x86 AEGIS implementations all fail the improved AEAD tests because
    they produce the wrong result with some data layouts. Also, when the
    MAY_SLEEP flag is given, they can sleep in the skcipher_walk_*()
    functions while preemption is disabled by kernel_fpu_begin().

    Fix these bugs.

    Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations")
    Cc: <stable@vger.kernel.org> # v4.18+
    Cc: Ondrej Mosnacek <omosnace@redhat.com>
    Signed-off-by: Eric Biggers <ebiggers@google.com>
    ---
    arch/x86/crypto/aegis128-aesni-glue.c | 38 ++++++++++----------------
    arch/x86/crypto/aegis128l-aesni-glue.c | 38 ++++++++++----------------
    arch/x86/crypto/aegis256-aesni-glue.c | 38 ++++++++++----------------
    3 files changed, 45 insertions(+), 69 deletions(-)

    diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
    index 2a356b948720..3ea71b871813 100644
    --- a/arch/x86/crypto/aegis128-aesni-glue.c
    +++ b/arch/x86/crypto/aegis128-aesni-glue.c
    @@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
    }

    static void crypto_aegis128_aesni_process_crypt(
    - struct aegis_state *state, struct aead_request *req,
    + struct aegis_state *state, struct skcipher_walk *walk,
    const struct aegis_crypt_ops *ops)
    {
    - struct skcipher_walk walk;
    - u8 *src, *dst;
    - unsigned int chunksize, base;
    -
    - ops->skcipher_walk_init(&walk, req, false);
    -
    - while (walk.nbytes) {
    - src = walk.src.virt.addr;
    - dst = walk.dst.virt.addr;
    - chunksize = walk.nbytes;
    -
    - ops->crypt_blocks(state, chunksize, src, dst);
    -
    - base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
    - src += base;
    - dst += base;
    - chunksize &= AEGIS128_BLOCK_SIZE - 1;
    -
    - if (chunksize > 0)
    - ops->crypt_tail(state, chunksize, src, dst);
    + while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
    + ops->crypt_blocks(state,
    + round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
    + walk->src.virt.addr, walk->dst.virt.addr);
    + skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
    + }

    - skcipher_walk_done(&walk, 0);
    + if (walk->nbytes) {
    + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
    + walk->dst.virt.addr);
    + skcipher_walk_done(walk, 0);
    }
    }

    @@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
    {
    struct crypto_aead *tfm = crypto_aead_reqtfm(req);
    struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
    + struct skcipher_walk walk;
    struct aegis_state state;

    + ops->skcipher_walk_init(&walk, req, true);
    +
    kernel_fpu_begin();

    crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
    crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
    - crypto_aegis128_aesni_process_crypt(&state, req, ops);
    + crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
    crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);

    kernel_fpu_end();
    diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
    index dbe8bb980da1..1b1b39c66c5e 100644
    --- a/arch/x86/crypto/aegis128l-aesni-glue.c
    +++ b/arch/x86/crypto/aegis128l-aesni-glue.c
    @@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
    }

    static void crypto_aegis128l_aesni_process_crypt(
    - struct aegis_state *state, struct aead_request *req,
    + struct aegis_state *state, struct skcipher_walk *walk,
    const struct aegis_crypt_ops *ops)
    {
    - struct skcipher_walk walk;
    - u8 *src, *dst;
    - unsigned int chunksize, base;
    -
    - ops->skcipher_walk_init(&walk, req, false);
    -
    - while (walk.nbytes) {
    - src = walk.src.virt.addr;
    - dst = walk.dst.virt.addr;
    - chunksize = walk.nbytes;
    -
    - ops->crypt_blocks(state, chunksize, src, dst);
    -
    - base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
    - src += base;
    - dst += base;
    - chunksize &= AEGIS128L_BLOCK_SIZE - 1;
    -
    - if (chunksize > 0)
    - ops->crypt_tail(state, chunksize, src, dst);
    + while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
    + ops->crypt_blocks(state, round_down(walk->nbytes,
    + AEGIS128L_BLOCK_SIZE),
    + walk->src.virt.addr, walk->dst.virt.addr);
    + skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
    + }

    - skcipher_walk_done(&walk, 0);
    + if (walk->nbytes) {
    + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
    + walk->dst.virt.addr);
    + skcipher_walk_done(walk, 0);
    }
    }

    @@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
    {
    struct crypto_aead *tfm = crypto_aead_reqtfm(req);
    struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
    + struct skcipher_walk walk;
    struct aegis_state state;

    + ops->skcipher_walk_init(&walk, req, true);
    +
    kernel_fpu_begin();

    crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
    crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
    - crypto_aegis128l_aesni_process_crypt(&state, req, ops);
    + crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
    crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);

    kernel_fpu_end();
    diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
    index 8bebda2de92f..6227ca3220a0 100644
    --- a/arch/x86/crypto/aegis256-aesni-glue.c
    +++ b/arch/x86/crypto/aegis256-aesni-glue.c
    @@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
    }

    static void crypto_aegis256_aesni_process_crypt(
    - struct aegis_state *state, struct aead_request *req,
    + struct aegis_state *state, struct skcipher_walk *walk,
    const struct aegis_crypt_ops *ops)
    {
    - struct skcipher_walk walk;
    - u8 *src, *dst;
    - unsigned int chunksize, base;
    -
    - ops->skcipher_walk_init(&walk, req, false);
    -
    - while (walk.nbytes) {
    - src = walk.src.virt.addr;
    - dst = walk.dst.virt.addr;
    - chunksize = walk.nbytes;
    -
    - ops->crypt_blocks(state, chunksize, src, dst);
    -
    - base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
    - src += base;
    - dst += base;
    - chunksize &= AEGIS256_BLOCK_SIZE - 1;
    -
    - if (chunksize > 0)
    - ops->crypt_tail(state, chunksize, src, dst);
    + while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
    + ops->crypt_blocks(state,
    + round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
    + walk->src.virt.addr, walk->dst.virt.addr);
    + skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
    + }

    - skcipher_walk_done(&walk, 0);
    + if (walk->nbytes) {
    + ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
    + walk->dst.virt.addr);
    + skcipher_walk_done(walk, 0);
    }
    }

    @@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
    {
    struct crypto_aead *tfm = crypto_aead_reqtfm(req);
    struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
    + struct skcipher_walk walk;
    struct aegis_state state;

    + ops->skcipher_walk_init(&walk, req, true);
    +
    kernel_fpu_begin();

    crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
    crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
    - crypto_aegis256_aesni_process_crypt(&state, req, ops);
    + crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
    crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);

    kernel_fpu_end();
    --
    2.20.1.321.g9e740568ce-goog
    \
     
     \ /
      Last update: 2019-01-23 23:54    [W:4.165 / U:0.028 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site