lkml.org 
[lkml]   [2011]   [Apr]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:x86/numa] x86-32, numa: Make @size in init_aloc_remap() represent bytes
    Commit-ID:  af7c1a6e8374e05aab4a98ce4d2fb07b66506a02
    Gitweb: http://git.kernel.org/tip/af7c1a6e8374e05aab4a98ce4d2fb07b66506a02
    Author: Tejun Heo <tj@kernel.org>
    AuthorDate: Tue, 5 Apr 2011 00:23:52 +0200
    Committer: H. Peter Anvin <hpa@zytor.com>
    CommitDate: Wed, 6 Apr 2011 17:57:11 -0700

    x86-32, numa: Make @size in init_aloc_remap() represent bytes

    @size variable in init_alloc_remap() is confusing in that it starts as
    number of bytes as its name implies and then becomes number of pages.
    Make it consistently represent bytes.

    Signed-off-by: Tejun Heo <tj@kernel.org>
    Link: http://lkml.kernel.org/r/1301955840-7246-7-git-send-email-tj@kernel.org
    Acked-by: Yinghai Lu <yinghai@kernel.org>
    Cc: David Rientjes <rientjes@google.com>
    Signed-off-by: H. Peter Anvin <hpa@zytor.com>
    ---
    arch/x86/mm/numa_32.c | 18 +++++++-----------
    1 files changed, 7 insertions(+), 11 deletions(-)

    diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
    index 30933fe..99310d2 100644
    --- a/arch/x86/mm/numa_32.c
    +++ b/arch/x86/mm/numa_32.c
    @@ -286,22 +286,19 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
    size = node_remap_size[nid];
    size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);

    - /* convert size to large (pmd size) pages, rounding up */
    - size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
    - /* now the roundup is correct, convert to PAGE_SIZE pages */
    - size = size * PTRS_PER_PTE;
    + /* align to large page */
    + size = ALIGN(size, LARGE_PAGE_BYTES);

    node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
    (u64)node_end_pfn[nid] << PAGE_SHIFT,
    - (u64)size << PAGE_SHIFT,
    - LARGE_PAGE_BYTES);
    + size, LARGE_PAGE_BYTES);
    if (node_pa == MEMBLOCK_ERROR)
    panic("Can not get kva ram\n");

    - node_remap_size[nid] = size;
    + node_remap_size[nid] = size >> PAGE_SHIFT;
    node_remap_offset[nid] = offset;
    printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n",
    - size, nid, node_pa >> PAGE_SHIFT);
    + size >> PAGE_SHIFT, nid, node_pa >> PAGE_SHIFT);

    /*
    * prevent kva address below max_low_pfn want it on system
    @@ -315,12 +312,11 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
    * So memblock_x86_reserve_range here, hope we don't run out
    * of that array
    */
    - memblock_x86_reserve_range(node_pa, node_pa + ((u64)size << PAGE_SHIFT),
    - "KVA RAM");
    + memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");

    node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;

    - return size;
    + return size >> PAGE_SHIFT;
    }

    static void init_remap_allocator(int nid)

    \
     
     \ /
      Last update: 2011-04-07 22:57    [W:0.025 / U:37.884 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site