lkml.org 
[lkml]   [2021]   [Jun]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 012/101] arm64: Force NO_BLOCK_MAPPINGS if crashkernel reservation is required
    Date
    From: Catalin Marinas <catalin.marinas@arm.com>

    commit 2687275a5843d1089687f08fc64eb3f3b026a169 upstream.

    mem_init() currently relies on knowing the boundaries of the crashkernel
    reservation to map such region with page granularity for later
    unmapping via set_memory_valid(..., 0). If the crashkernel reservation
    is deferred, such boundaries are not known when the linear mapping is
    created. Simply parse the command line for "crashkernel" and, if found,
    create the linear map with NO_BLOCK_MAPPINGS.

    Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
    Tested-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
    Reviewed-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
    Acked-by: James Morse <james.morse@arm.com>
    Cc: James Morse <james.morse@arm.com>
    Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
    Link: https://lore.kernel.org/r/20201119175556.18681-1-catalin.marinas@arm.com
    Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
    Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    arch/arm64/mm/mmu.c | 37 ++++++++++++++++---------------------
    1 file changed, 16 insertions(+), 21 deletions(-)

    diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
    index afdad7607850..58dc93e56617 100644
    --- a/arch/arm64/mm/mmu.c
    +++ b/arch/arm64/mm/mmu.c
    @@ -469,6 +469,21 @@ void __init mark_linear_text_alias_ro(void)
    PAGE_KERNEL_RO);
    }

    +static bool crash_mem_map __initdata;
    +
    +static int __init enable_crash_mem_map(char *arg)
    +{
    + /*
    + * Proper parameter parsing is done by reserve_crashkernel(). We only
    + * need to know if the linear map has to avoid block mappings so that
    + * the crashkernel reservations can be unmapped later.
    + */
    + crash_mem_map = true;
    +
    + return 0;
    +}
    +early_param("crashkernel", enable_crash_mem_map);
    +
    static void __init map_mem(pgd_t *pgdp)
    {
    phys_addr_t kernel_start = __pa_symbol(_text);
    @@ -477,7 +492,7 @@ static void __init map_mem(pgd_t *pgdp)
    int flags = 0;
    u64 i;

    - if (rodata_full || debug_pagealloc_enabled())
    + if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
    flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;

    /*
    @@ -487,11 +502,6 @@ static void __init map_mem(pgd_t *pgdp)
    * the following for-loop
    */
    memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
    -#ifdef CONFIG_KEXEC_CORE
    - if (crashk_res.end)
    - memblock_mark_nomap(crashk_res.start,
    - resource_size(&crashk_res));
    -#endif

    /* map all the memory banks */
    for_each_mem_range(i, &start, &end) {
    @@ -519,21 +529,6 @@ static void __init map_mem(pgd_t *pgdp)
    __map_memblock(pgdp, kernel_start, kernel_end,
    PAGE_KERNEL, NO_CONT_MAPPINGS);
    memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
    -
    -#ifdef CONFIG_KEXEC_CORE
    - /*
    - * Use page-level mappings here so that we can shrink the region
    - * in page granularity and put back unused memory to buddy system
    - * through /sys/kernel/kexec_crash_size interface.
    - */
    - if (crashk_res.end) {
    - __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
    - PAGE_KERNEL,
    - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
    - memblock_clear_nomap(crashk_res.start,
    - resource_size(&crashk_res));
    - }
    -#endif
    }

    void mark_rodata_ro(void)
    --
    2.30.2
    \
     
     \ /
      Last update: 2021-06-28 16:30    [W:4.130 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site