lkml.org 
[lkml]   [2011]   [Feb]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 2/3] x86,mm: Introduce init_memory_mapping_ext()

Add extra input tbl_end. it could be smaller than end.

Prepare for init_memory_mapping_high() to align boundary to 1G.
aka end could round up to 1g bound, and will be bigger then original
node end.

init_memory_mapping will call init_memory_mapping_ext with tbl=end.

-v2: updated after page_size_mask change

Signed-off-by: Yinghai Lu <yinghai@kernel.org>

---
arch/x86/include/asm/page_types.h | 7 +++++--
arch/x86/mm/init.c | 21 ++++++++++++++-------
2 files changed, 19 insertions(+), 9 deletions(-)

Index: linux-2.6/arch/x86/include/asm/page_types.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/page_types.h
+++ linux-2.6/arch/x86/include/asm/page_types.h
@@ -51,8 +51,11 @@ static inline phys_addr_t get_max_mapped
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}

-extern unsigned long init_memory_mapping(unsigned long start,
- unsigned long end);
+unsigned long init_memory_mapping_ext(unsigned long start,
+ unsigned long end,
+ unsigned long tbl_end);
+
+unsigned long init_memory_mapping(unsigned long start, unsigned long end);

void init_memory_mapping_high(void);

Index: linux-2.6/arch/x86/mm/init.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/init.c
+++ linux-2.6/arch/x86/mm/init.c
@@ -32,9 +32,10 @@ int direct_gbpages

int page_size_mask = -1;

-static void __init find_early_table_space(unsigned long end)
+static void __init find_early_table_space(unsigned long end,
+ unsigned long tbl_end)
{
- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
+ unsigned long puds, pmds, ptes, tables, start = 0;
phys_addr_t base;

puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
@@ -67,10 +68,10 @@ static void __init find_early_table_spac
/* for fixmap */
tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);

- good_end = max_pfn_mapped << PAGE_SHIFT;
+ tbl_end = max_pfn_mapped << PAGE_SHIFT;
#endif

- base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
+ base = memblock_find_in_range(start, tbl_end, tables, PAGE_SIZE);
if (base == MEMBLOCK_ERROR)
panic("Cannot find space for the kernel page tables");

@@ -144,8 +145,9 @@ static int __meminit save_mr(struct map_
* This runs before bootmem is initialized and gets pages directly from
* the physical memory. To access them they are temporarily mapped.
*/
-unsigned long __init_refok init_memory_mapping(unsigned long start,
- unsigned long end)
+unsigned long __init_refok init_memory_mapping_ext(unsigned long start,
+ unsigned long end,
+ unsigned long tbl_end)
{
unsigned long start_pfn, end_pfn;
unsigned long ret = 0;
@@ -260,7 +262,7 @@ unsigned long __init_refok init_memory_m
* nodes are discovered.
*/
if (!after_bootmem)
- find_early_table_space(end);
+ find_early_table_space(end, tbl_end);

for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
@@ -284,6 +286,11 @@ unsigned long __init_refok init_memory_m
return ret >> PAGE_SHIFT;
}

+unsigned long __init_refok init_memory_mapping(unsigned long start,
+ unsigned long end)
+{
+ return init_memory_mapping_ext(start, end, end);
+}

/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address

\
 
 \ /
  Last update: 2011-02-26 04:11    [W:0.074 / U:2.156 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site