Messages in this thread Patch in this message | | | Date | Tue, 17 Mar 2009 12:42:52 -0700 | From | Jeremy Fitzhardinge <> | Subject | Re: [crash] Re: Latest brk patchset |
| |
H. Peter Anvin wrote: > Jeremy Fitzhardinge wrote: > >> H. Peter Anvin wrote: >> >>> Yinghai Lu wrote: >>> >>> >>>>> You could also write: >>>>> >>>>> PAGE_TABLE_SIZE((-PAGE_OFFSET & 0xffffffff) >> PAGE_SHIFT) << >>>>> PAGE_SHIFT >>>>> >>>>> (the & 0xffffffff is necessary, or you'll get a signed shift.) >>>>> >>>>> >>>> max kernel address space range? >>>> >>>> >>>> >>> Yes, either ((1 << 32)-PAGE_OFFSET) or (-PAGE_OFFSET & 0xffffffff) >>> should work for that. >>> >>> >> The former seems more obvious. >> > > Hi, > > Are you refreshing the patchset? >
Here's an updated patch:
From a7029ec1dbff1f32d75c03d45b3bb3497fb3032c Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Date: Mon, 16 Mar 2009 12:07:54 -0700 Subject: [PATCH] x86-32: make sure we map enough to fit linear map pagetables
head_32.S needs to map the kernel itself, and enough space so that mm/init.c can allocate space from the e820 allocator for the linear map of low memory.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c79741c..83e4192 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -38,8 +38,8 @@ #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id /* - * This is how much memory *in addition to the memory covered up to - * and including _end* we need mapped initially. + * This is how much memory in addition to the memory covered up to + * and including _end we need mapped initially. * We need: * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) @@ -52,19 +52,28 @@ * KERNEL_IMAGE_SIZE should be greater than pa(_end) * and small than max_low_pfn, otherwise will waste some page table entries */ -LOW_PAGES = (KERNEL_IMAGE_SIZE + PAGE_SIZE_asm - 1)>>PAGE_SHIFT #if PTRS_PER_PMD > 1 -PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD +#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) #else -PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD) +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) #endif ALLOCATOR_SLOP = 4 -INIT_MAP_SIZE = (PAGE_TABLE_SIZE + ALLOCATOR_SLOP) * PAGE_SIZE_asm -RESERVE_BRK(pagetables, INIT_MAP_SIZE) +/* Enough space to fit pagetables for the low memory linear map */ +MAPPING_BEYOND_END = (PAGE_TABLE_SIZE(((1 << 32) - __PAGE_OFFSET) / PAGE_SIZE) * PAGE_SIZE) /* + * Worst-case size of the kernel mapping we need to make: + * the worst-case size of the kernel itself, plus the extra we need + * to map for the linear map. + */ +KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT + +INIT_MAP_SIZE = (PAGE_TABLE_SIZE(KERNEL_PAGES) + ALLOCATOR_SLOP) * PAGE_SIZE_asm +RESERVE_BRK(pagetables, INIT_MAP_SIZE) + +/* * 32-bit kernel entrypoint; only used by the boot CPU. On entry, * %esi points to the real-mode code as a 32-bit pointer. * CS and DS must be 4 GB flat segments, but we don't depend on @@ -197,9 +206,9 @@ default_entry: loop 11b /* - * End condition: we must map up to the end. + * End condition: we must map up to the end + MAPPING_BEYOND_END. */ - movl $pa(_end) + PTE_IDENT_ATTR, %ebp + movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp cmpl %ebp,%eax jb 10b 1: @@ -229,9 +238,9 @@ page_pde_offset = (__PAGE_OFFSET >> 20); addl $0x1000,%eax loop 11b /* - * End condition: we must map up to end + * End condition: we must map up to the end + MAPPING_BEYOND_END. */ - movl $pa(_end) + PTE_IDENT_ATTR, %ebp + movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp cmpl %ebp,%eax jb 10b addl $__PAGE_OFFSET, %edi
| |