lkml.org 
[lkml]   [2020]   [Nov]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 4.4 17/70] crypto: arm64/sha - avoid non-standard inline asm tricks
On Mon, Nov 26, 2018 at 11:50:32AM +0100, Greg Kroah-Hartman wrote:
> 4.4-stable review patch. If anyone has any objections, please let me know.

fyi, I bisected a regression down to this commit. This apparently
causes an ADR_PREL_PG_HI21 relocation to be added to the sha{1,2}_ce
modules. Back in 4.4 ADR_PREL_PG_HI21 relocations were forbidden if
built with CONFIG_ARM64_ERRATUM_843419=y, so now the sha{1,2}_ce modules
fail to load:

[ 37.866250] module sha1_ce: unsupported RELA relocation: 275

Looks like it should be an issue for 4.14.y as well, but I haven't yet
tested it.

-dann

> From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
>
> commit f4857f4c2ee9aa4e2aacac1a845352b00197fb57 upstream.
>
> Replace the inline asm which exports struct offsets as ELF symbols
> with proper const variables exposing the same values. This works
> around an issue with Clang which does not interpret the "i" (or "I")
> constraints in the same way as GCC.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> Tested-by: Matthias Kaehlcke <mka@chromium.org>
> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
> Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
> ---
> arch/arm64/crypto/sha1-ce-core.S | 6 ++++--
> arch/arm64/crypto/sha1-ce-glue.c | 11 +++--------
> arch/arm64/crypto/sha2-ce-core.S | 6 ++++--
> arch/arm64/crypto/sha2-ce-glue.c | 13 +++++--------
> 4 files changed, 16 insertions(+), 20 deletions(-)
>
> --- a/arch/arm64/crypto/sha1-ce-core.S
> +++ b/arch/arm64/crypto/sha1-ce-core.S
> @@ -82,7 +82,8 @@ ENTRY(sha1_ce_transform)
> ldr dgb, [x0, #16]
>
> /* load sha1_ce_state::finalize */
> - ldr w4, [x0, #:lo12:sha1_ce_offsetof_finalize]
> + ldr_l w4, sha1_ce_offsetof_finalize, x4
> + ldr w4, [x0, x4]
>
> /* load input */
> 0: ld1 {v8.4s-v11.4s}, [x1], #64
> @@ -132,7 +133,8 @@ CPU_LE( rev32 v11.16b, v11.16b )
> * the padding is handled by the C code in that case.
> */
> cbz x4, 3f
> - ldr x4, [x0, #:lo12:sha1_ce_offsetof_count]
> + ldr_l w4, sha1_ce_offsetof_count, x4
> + ldr x4, [x0, x4]
> movi v9.2d, #0
> mov x8, #0x80000000
> movi v10.2d, #0
> --- a/arch/arm64/crypto/sha1-ce-glue.c
> +++ b/arch/arm64/crypto/sha1-ce-glue.c
> @@ -17,9 +17,6 @@
> #include <linux/crypto.h>
> #include <linux/module.h>
>
> -#define ASM_EXPORT(sym, val) \
> - asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
> -
> MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
> MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
> MODULE_LICENSE("GPL v2");
> @@ -32,6 +29,9 @@ struct sha1_ce_state {
> asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
> int blocks);
>
> +const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
> +const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize);
> +
> static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
> unsigned int len)
> {
> @@ -52,11 +52,6 @@ static int sha1_ce_finup(struct shash_de
> struct sha1_ce_state *sctx = shash_desc_ctx(desc);
> bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
>
> - ASM_EXPORT(sha1_ce_offsetof_count,
> - offsetof(struct sha1_ce_state, sst.count));
> - ASM_EXPORT(sha1_ce_offsetof_finalize,
> - offsetof(struct sha1_ce_state, finalize));
> -
> /*
> * Allow the asm code to perform the finalization if there is no
> * partial data and the input is a round multiple of the block size.
> --- a/arch/arm64/crypto/sha2-ce-core.S
> +++ b/arch/arm64/crypto/sha2-ce-core.S
> @@ -88,7 +88,8 @@ ENTRY(sha2_ce_transform)
> ld1 {dgav.4s, dgbv.4s}, [x0]
>
> /* load sha256_ce_state::finalize */
> - ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
> + ldr_l w4, sha256_ce_offsetof_finalize, x4
> + ldr w4, [x0, x4]
>
> /* load input */
> 0: ld1 {v16.4s-v19.4s}, [x1], #64
> @@ -136,7 +137,8 @@ CPU_LE( rev32 v19.16b, v19.16b )
> * the padding is handled by the C code in that case.
> */
> cbz x4, 3f
> - ldr x4, [x0, #:lo12:sha256_ce_offsetof_count]
> + ldr_l w4, sha256_ce_offsetof_count, x4
> + ldr x4, [x0, x4]
> movi v17.2d, #0
> mov x8, #0x80000000
> movi v18.2d, #0
> --- a/arch/arm64/crypto/sha2-ce-glue.c
> +++ b/arch/arm64/crypto/sha2-ce-glue.c
> @@ -17,9 +17,6 @@
> #include <linux/crypto.h>
> #include <linux/module.h>
>
> -#define ASM_EXPORT(sym, val) \
> - asm(".globl " #sym "; .set " #sym ", %0" :: "I"(val));
> -
> MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
> MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
> MODULE_LICENSE("GPL v2");
> @@ -32,6 +29,11 @@ struct sha256_ce_state {
> asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src,
> int blocks);
>
> +const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
> + sst.count);
> +const u32 sha256_ce_offsetof_finalize = offsetof(struct sha256_ce_state,
> + finalize);
> +
> static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
> unsigned int len)
> {
> @@ -52,11 +54,6 @@ static int sha256_ce_finup(struct shash_
> struct sha256_ce_state *sctx = shash_desc_ctx(desc);
> bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
>
> - ASM_EXPORT(sha256_ce_offsetof_count,
> - offsetof(struct sha256_ce_state, sst.count));
> - ASM_EXPORT(sha256_ce_offsetof_finalize,
> - offsetof(struct sha256_ce_state, finalize));
> -
> /*
> * Allow the asm code to perform the finalization if there is no
> * partial data and the input is a round multiple of the block size.
>
>

\
 
 \ /
  Last update: 2020-11-23 21:51    [W:0.283 / U:0.424 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site