lkml.org 
[lkml]   [2011]   [Feb]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 11/11] memblock: Move __alloc_memory_core_early() to nobootmem.c
    Date
    We can remove #ifdef in mm/page_alloc.c

    and change that function to static

    Signed-off-by: Yinghai Lu <yinghai@kernel.org>
    ---
    include/linux/mm.h | 2 --
    mm/nobootmem.c | 25 +++++++++++++++++++++++++
    mm/page_alloc.c | 28 ----------------------------
    3 files changed, 25 insertions(+), 30 deletions(-)

    diff --git a/include/linux/mm.h b/include/linux/mm.h
    index f6385fc..679300c 100644
    --- a/include/linux/mm.h
    +++ b/include/linux/mm.h
    @@ -1309,8 +1309,6 @@ int add_from_early_node_map(struct range *range, int az,
    int nr_range, int nid);
    u64 __init find_memory_core_early(int nid, u64 size, u64 align,
    u64 goal, u64 limit);
    -void *__alloc_memory_core_early(int nodeid, u64 size, u64 align,
    - u64 goal, u64 limit);
    typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
    extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
    extern void sparse_memory_present_with_active_regions(int nid);
    diff --git a/mm/nobootmem.c b/mm/nobootmem.c
    index 6a018e4..e2bdb07 100644
    --- a/mm/nobootmem.c
    +++ b/mm/nobootmem.c
    @@ -40,6 +40,31 @@ unsigned long max_pfn;
    unsigned long saved_max_pfn;
    #endif

    +static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
    + u64 goal, u64 limit)
    +{
    + void *ptr;
    + u64 addr;
    +
    + if (limit > memblock.current_limit)
    + limit = memblock.current_limit;
    +
    + addr = find_memory_core_early(nid, size, align, goal, limit);
    +
    + if (addr == MEMBLOCK_ERROR)
    + return NULL;
    +
    + ptr = phys_to_virt(addr);
    + memset(ptr, 0, size);
    + memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
    + /*
    + * The min_count is set to 0 so that bootmem allocated blocks
    + * are never reported as leaks.
    + */
    + kmemleak_alloc(ptr, size, 0, 0);
    + return ptr;
    +}
    +
    /*
    * free_bootmem_late - free bootmem pages directly to page allocator
    * @addr: starting address of the range
    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index a243a7f..6035136 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -3780,34 +3780,6 @@ int __init add_from_early_node_map(struct range *range, int az,
    return nr_range;
    }

    -#ifdef CONFIG_NO_BOOTMEM
    -void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
    - u64 goal, u64 limit)
    -{
    - void *ptr;
    - u64 addr;
    -
    - if (limit > memblock.current_limit)
    - limit = memblock.current_limit;
    -
    - addr = find_memory_core_early(nid, size, align, goal, limit);
    -
    - if (addr == MEMBLOCK_ERROR)
    - return NULL;
    -
    - ptr = phys_to_virt(addr);
    - memset(ptr, 0, size);
    - memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
    - /*
    - * The min_count is set to 0 so that bootmem allocated blocks
    - * are never reported as leaks.
    - */
    - kmemleak_alloc(ptr, size, 0, 0);
    - return ptr;
    -}
    -#endif
    -
    -
    void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
    {
    int i;
    --
    1.7.1


    \
     
     \ /
      Last update: 2011-02-08 00:57    [W:0.047 / U:0.208 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site