lkml.org 
[lkml]   [2020]   [Mar]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 05/28] efi/arm64: clean EFI stub exit code from cache instead of avoiding it
Date
Commit 9f9223778 ("efi/libstub/arm: Make efi_entry() an ordinary PE/COFF
entrypoint") modified the handover code written in assembler, and for
maintainability, aligned the logic with the logic used in the 32-bit ARM
version, which is to avoid cache maintenance on the remaining instructions
in the subroutine that will be executed with the MMU and caches off, and
instead, branch into the relocated copy of the kernel image.

However, this assumes that this copy is executable, and this means we
expect EFI_LOADER_DATA regions to be executable as well, which is not
a reasonable assumption to make, even if this is true for most UEFI
implementations today.

So change this back, and add a __clean_dcache_area_poc() call to cover
the remaining code in the subroutine. While at it, switch the other
call site over to __clean_dcache_area_poc() as well, and clean up the
terminology in comments to avoid using 'flush' in the context of cache
maintenance. Also, let's switch to the new style asm annotations.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/arm64/kernel/efi-entry.S | 26 +++++++++++++-------------
arch/arm64/kernel/image-vars.h | 4 ++--
2 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 4cfd03c35c49..1a03618df0df 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -12,32 +12,32 @@

__INIT

-ENTRY(efi_enter_kernel)
+SYM_CODE_START(efi_enter_kernel)
/*
* efi_entry() will have copied the kernel image if necessary and we
* end up here with device tree address in x1 and the kernel entry
* point stored in x0. Save those values in registers which are
* callee preserved.
*/
- mov x19, x0 // relocated Image address
+ ldr w2, =stext_offset
+ add x19, x0, x2 // relocated Image entrypoint
mov x20, x1 // DTB address

/*
- * Flush the copied Image to the PoC, and ensure it is not shadowed by
+ * Clean the copied Image to the PoC, and ensure it is not shadowed by
* stale icache entries from before relocation.
*/
ldr w1, =kernel_size
- bl __flush_dcache_area
+ bl __clean_dcache_area_poc
ic ialluis
- dsb sy

/*
- * Jump across, into the copy of the image that we just cleaned
- * to the PoC, so that we can safely disable the MMU and caches.
+ * Clean the remainder of this routine to the PoC
+ * so that we can safely disable the MMU and caches.
*/
- ldr w0, .Ljmp
- sub x0, x19, w0, sxtw
- br x0
+ adr x0, 0f
+ ldr w1, 3f
+ bl __clean_dcache_area_poc
0:
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
@@ -63,6 +63,6 @@ ENTRY(efi_enter_kernel)
mov x1, xzr
mov x2, xzr
mov x3, xzr
- b stext
-ENDPROC(efi_enter_kernel)
-.Ljmp: .long _text - 0b
+ br x19
+SYM_CODE_END(efi_enter_kernel)
+3: .long . - 0b
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 9a7aef0d6f70..7f06ad93fc95 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -13,6 +13,7 @@
#ifdef CONFIG_EFI

__efistub_kernel_size = _edata - _text;
+__efistub_stext_offset = stext - _text;


/*
@@ -34,7 +35,7 @@ __efistub_strnlen = __pi_strnlen;
__efistub_strcmp = __pi_strcmp;
__efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr;
-__efistub___flush_dcache_area = __pi___flush_dcache_area;
+__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;

#ifdef CONFIG_KASAN
__efistub___memcpy = __pi_memcpy;
@@ -43,7 +44,6 @@ __efistub___memset = __pi_memset;
#endif

__efistub__text = _text;
-__efistub_stext = stext;
__efistub__end = _end;
__efistub__edata = _edata;
__efistub_screen_info = screen_info;
--
2.17.1
\
 
 \ /
  Last update: 2020-03-08 09:10    [W:0.099 / U:0.556 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site