lkml.org 
[lkml]   [2013]   [Sep]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 3/6] x86/mm: Factor out of top-down direct mapping setup
    From: Tang Chen <tangchen@cn.fujitsu.com>

    This patch introduces a new function memory_map_top_down to
    factor out of the top-down direct memory mapping pagetable
    setup. This is also a preparation for the following patch,
    which will introduce the bottom-up memory mapping. That said,
    we will put the two ways of pagetable setup into separate
    functions, and choose to use which way in init_mem_mapping,
    which makes the code more clear.

    Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
    Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
    ---
    arch/x86/mm/init.c | 70 ++++++++++++++++++++++++++++++++-------------------
    1 files changed, 44 insertions(+), 26 deletions(-)

    diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
    index 04664cd..73e79e6 100644
    --- a/arch/x86/mm/init.c
    +++ b/arch/x86/mm/init.c
    @@ -401,27 +401,27 @@ static unsigned long __init init_range_memory_mapping(

    /* (PUD_SHIFT-PMD_SHIFT)/2 */
    #define STEP_SIZE_SHIFT 5
    -void __init init_mem_mapping(void)
    +
    +/**
    + * memory_map_top_down - Map [map_start, map_end) top down
    + * @map_start: start address of the target memory range
    + * @map_end: end address of the target memory range
    + *
    + * This function will setup direct mapping for memory range [map_start, map_end)
    + * in a heuristic way. In the beginning, step_size is small. The more memory we
    + * map memory in the next loop.
    + */
    +static void __init memory_map_top_down(unsigned long map_start,
    + unsigned long map_end)
    {
    - unsigned long end, real_end, start, last_start;
    + unsigned long real_end, start, last_start;
    unsigned long step_size;
    unsigned long addr;
    unsigned long mapped_ram_size = 0;
    unsigned long new_mapped_ram_size;

    - probe_page_size_mask();
    -
    -#ifdef CONFIG_X86_64
    - end = max_pfn << PAGE_SHIFT;
    -#else
    - end = max_low_pfn << PAGE_SHIFT;
    -#endif
    -
    - /* the ISA range is always mapped regardless of memory holes */
    - init_memory_mapping(0, ISA_END_ADDRESS);
    -
    /* xen has big range in reserved near end of ram, skip it at first.*/
    - addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
    + addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
    real_end = addr + PMD_SIZE;

    /* step_size need to be small so pgt_buf from BRK could cover it */
    @@ -430,19 +430,13 @@ void __init init_mem_mapping(void)
    min_pfn_mapped = real_end >> PAGE_SHIFT;
    last_start = start = real_end;

    - /*
    - * We start from the top (end of memory) and go to the bottom.
    - * The memblock_find_in_range() gets us a block of RAM from the
    - * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
    - * for page table.
    - */
    - while (last_start > ISA_END_ADDRESS) {
    + while (last_start > map_start) {
    if (last_start > step_size) {
    start = round_down(last_start - 1, step_size);
    - if (start < ISA_END_ADDRESS)
    - start = ISA_END_ADDRESS;
    + if (start < map_start)
    + start = map_start;
    } else
    - start = ISA_END_ADDRESS;
    + start = map_start;
    new_mapped_ram_size = init_range_memory_mapping(start,
    last_start);
    last_start = start;
    @@ -453,8 +447,32 @@ void __init init_mem_mapping(void)
    mapped_ram_size += new_mapped_ram_size;
    }

    - if (real_end < end)
    - init_range_memory_mapping(real_end, end);
    + if (real_end < map_end)
    + init_range_memory_mapping(real_end, map_end);
    +}
    +
    +void __init init_mem_mapping(void)
    +{
    + unsigned long end;
    +
    + probe_page_size_mask();
    +
    +#ifdef CONFIG_X86_64
    + end = max_pfn << PAGE_SHIFT;
    +#else
    + end = max_low_pfn << PAGE_SHIFT;
    +#endif
    +
    + /* the ISA range is always mapped regardless of memory holes */
    + init_memory_mapping(0, ISA_END_ADDRESS);
    +
    + /*
    + * We start from the top (end of memory) and go to the bottom.
    + * The memblock_find_in_range() gets us a block of RAM from the
    + * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
    + * for page table.
    + */
    + memory_map_top_down(ISA_END_ADDRESS, end);

    #ifdef CONFIG_X86_64
    if (max_pfn > max_low_pfn) {
    --
    1.7.1

    \
     
     \ /
      Last update: 2013-09-24 12:21    [W:3.108 / U:0.032 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site