lkml.org 
[lkml]   [2022]   [Sep]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 13/16] x86/compressed: move startup32_check_sev_cbit() into .text
    Date
    Move startup32_check_sev_cbit() into the .text section and turn it into
    an ordinary function using the ordinary 32-bit calling convention,
    instead of saving/restoring the registers that are known to be live at
    the only call site. This improves maintainability, and makes it possible
    to move this function out of head_64.S and into a separate compilation
    unit that is specific to memory encryption.

    Note that this requires the call site to be moved before the mixed mode
    check, as %eax will be live otherwise.

    Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
    ---
    arch/x86/boot/compressed/head_64.S | 35 +++++++++++---------
    1 file changed, 19 insertions(+), 16 deletions(-)

    diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
    index abb5a650a816..639f688e4949 100644
    --- a/arch/x86/boot/compressed/head_64.S
    +++ b/arch/x86/boot/compressed/head_64.S
    @@ -251,6 +251,11 @@ SYM_FUNC_START(startup_32)
    movl $__BOOT_TSS, %eax
    ltr %ax

    +#ifdef CONFIG_AMD_MEM_ENCRYPT
    + /* Check if the C-bit position is correct when SEV is active */
    + call startup32_check_sev_cbit
    +#endif
    +
    /*
    * Setup for the jump to 64bit mode
    *
    @@ -268,8 +273,6 @@ SYM_FUNC_START(startup_32)
    leal rva(startup_64_mixedmode)(%ebp), %eax
    1:
    #endif
    - /* Check if the C-bit position is correct when SEV is active */
    - call startup32_check_sev_cbit

    pushl $__KERNEL_CS
    pushl %eax
    @@ -724,16 +727,17 @@ SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
    * succeed. An incorrect C-bit position will map all memory unencrypted, so that
    * the compare will use the encrypted random data and fail.
    */
    - __HEAD
    -SYM_FUNC_START(startup32_check_sev_cbit)
    #ifdef CONFIG_AMD_MEM_ENCRYPT
    - pushl %eax
    + .text
    +SYM_FUNC_START(startup32_check_sev_cbit)
    pushl %ebx
    - pushl %ecx
    - pushl %edx
    + pushl %ebp
    +
    + call 0f
    +0: popl %ebp

    /* Check for non-zero sev_status */
    - movl rva(sev_status)(%ebp), %eax
    + movl (sev_status - 0b)(%ebp), %eax
    testl %eax, %eax
    jz 4f

    @@ -748,17 +752,18 @@ SYM_FUNC_START(startup32_check_sev_cbit)
    jnc 2b

    /* Store to memory and keep it in the registers */
    - movl %eax, rva(sev_check_data)(%ebp)
    - movl %ebx, rva(sev_check_data+4)(%ebp)
    + leal (sev_check_data - 0b)(%ebp), %ebp
    + movl %eax, 0(%ebp)
    + movl %ebx, 4(%ebp)

    /* Enable paging to see if encryption is active */
    movl %cr0, %edx /* Backup %cr0 in %edx */
    movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
    movl %ecx, %cr0

    - cmpl %eax, rva(sev_check_data)(%ebp)
    + cmpl %eax, 0(%ebp)
    jne 3f
    - cmpl %ebx, rva(sev_check_data+4)(%ebp)
    + cmpl %ebx, 4(%ebp)
    jne 3f

    movl %edx, %cr0 /* Restore previous %cr0 */
    @@ -770,13 +775,11 @@ SYM_FUNC_START(startup32_check_sev_cbit)
    jmp 3b

    4:
    - popl %edx
    - popl %ecx
    + popl %ebp
    popl %ebx
    - popl %eax
    -#endif
    RET
    SYM_FUNC_END(startup32_check_sev_cbit)
    +#endif

    /*
    * Stack and heap for uncompression
    --
    2.35.1
    \
     
     \ /
      Last update: 2022-09-21 16:56    [W:4.158 / U:0.332 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site