lkml.org 
[lkml]   [2008]   [Jul]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCh] x86: overmapped fix when 4K pages on tail - 64bit
    Date


    fix phys_pmd_init to make sure not to return big value than end.

    also print out range split:1G/2M/4K

    Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>

    ---
    arch/x86/mm/init_64.c | 106 +++++++++++++++++++++++++++++++++-----------------
    1 file changed, 72 insertions(+), 34 deletions(-)

    Index: linux-2.6/arch/x86/mm/init_64.c
    ===================================================================
    --- linux-2.6.orig/arch/x86/mm/init_64.c
    +++ linux-2.6/arch/x86/mm/init_64.c
    @@ -302,11 +302,13 @@ static __meminit void unmap_low_page(voi
    early_iounmap(adr, PAGE_SIZE);
    }

    -static void __meminit
    +static unsigned long __meminit
    phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end)
    {
    unsigned pages = 0;
    + unsigned long last_map_addr = end;
    int i;
    +
    pte_t *pte = pte_page + pte_index(addr);

    for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
    @@ -326,17 +328,20 @@ phys_pte_init(pte_t *pte_page, unsigned
    printk(" pte=%p addr=%lx pte=%016lx\n",
    pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
    set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL));
    + last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
    pages++;
    }
    update_page_count(PG_LEVEL_4K, pages);
    +
    + return last_map_addr;
    }

    -static void __meminit
    +static unsigned long __meminit
    phys_pte_update(pmd_t *pmd, unsigned long address, unsigned long end)
    {
    pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);

    - phys_pte_init(pte, address, end);
    + return phys_pte_init(pte, address, end);
    }

    static unsigned long __meminit
    @@ -344,6 +349,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned
    unsigned long page_size_mask)
    {
    unsigned long pages = 0;
    + unsigned long last_map_addr = end;

    int i = pmd_index(address);

    @@ -362,7 +368,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned

    if (pmd_val(*pmd)) {
    if (!pmd_large(*pmd))
    - phys_pte_update(pmd, address, end);
    + last_map_addr = phys_pte_update(pmd, address,
    + end);
    continue;
    }

    @@ -370,17 +377,18 @@ phys_pmd_init(pmd_t *pmd_page, unsigned
    pages++;
    set_pte((pte_t *)pmd,
    pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
    + last_map_addr = (address & PMD_MASK) + PMD_SIZE;
    continue;
    }

    pte = alloc_low_page(&pte_phys);
    - phys_pte_init(pte, address, end);
    + last_map_addr = phys_pte_init(pte, address, end);
    unmap_low_page(pte);

    pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
    }
    update_page_count(PG_LEVEL_2M, pages);
    - return address;
    + return last_map_addr;
    }

    static unsigned long __meminit
    @@ -673,6 +681,32 @@ static unsigned long __init kernel_physi

    return last_map_addr;
    }
    +
    +struct map_range {
    + unsigned long start;
    + unsigned long end;
    + unsigned page_size_mask;
    +};
    +
    +#define NR_RANGE_MR 5
    +
    +static int save_mr(struct map_range *mr, int nr_range,
    + unsigned long start_pfn, unsigned long end_pfn,
    + unsigned long page_size_mask)
    +{
    +
    + if (start_pfn < end_pfn) {
    + if (nr_range >= NR_RANGE_MR)
    + panic("run out of range for init_memory_mapping\n");
    + mr[nr_range].start = start_pfn<<PAGE_SHIFT;
    + mr[nr_range].end = end_pfn<<PAGE_SHIFT;
    + mr[nr_range].page_size_mask = page_size_mask;
    + nr_range++;
    + }
    +
    + return nr_range;
    +}
    +
    /*
    * Setup the direct mapping of the physical memory at PAGE_OFFSET.
    * This runs before bootmem is initialized and gets pages directly from
    @@ -681,10 +715,13 @@ static unsigned long __init kernel_physi
    unsigned long __init_refok init_memory_mapping(unsigned long start,
    unsigned long end)
    {
    - unsigned long last_map_addr = end;
    + unsigned long last_map_addr = 0;
    unsigned long page_size_mask = 0;
    unsigned long start_pfn, end_pfn;

    + struct map_range mr[NR_RANGE_MR];
    + int nr_range, i;
    +
    printk(KERN_INFO "init_memory_mapping\n");

    /*
    @@ -694,24 +731,22 @@ unsigned long __init_refok init_memory_m
    * memory mapped. Unfortunately this is done currently before the
    * nodes are discovered.
    */
    - if (!after_bootmem) {
    + if (!after_bootmem)
    init_gbpages();
    - find_early_table_space(end);
    - }

    if (direct_gbpages)
    page_size_mask |= 1 << PG_LEVEL_1G;
    if (cpu_has_pse)
    page_size_mask |= 1 << PG_LEVEL_2M;

    - /* head if not big page aligment ?*/
    + memset(mr, 0, sizeof(mr));
    + nr_range = 0;
    +
    + /* head if not big page alignment ?*/
    start_pfn = start >> PAGE_SHIFT;
    end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
    << (PMD_SHIFT - PAGE_SHIFT);
    - if (start_pfn < end_pfn)
    - last_map_addr = kernel_physical_mapping_init(
    - start_pfn<<PAGE_SHIFT,
    - end_pfn<<PAGE_SHIFT, 0);
    + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);

    /* big page (2M) range*/
    start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
    @@ -720,37 +755,40 @@ unsigned long __init_refok init_memory_m
    << (PUD_SHIFT - PAGE_SHIFT);
    if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
    end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
    - if (start_pfn < end_pfn)
    - last_map_addr = kernel_physical_mapping_init(
    - start_pfn<<PAGE_SHIFT,
    - end_pfn<<PAGE_SHIFT,
    - page_size_mask & (1<<PG_LEVEL_2M));
    + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
    + page_size_mask & (1<<PG_LEVEL_2M));

    /* big page (1G) range */
    start_pfn = end_pfn;
    end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
    - if (start_pfn < end_pfn)
    - last_map_addr = kernel_physical_mapping_init(
    - start_pfn<<PAGE_SHIFT,
    - end_pfn<<PAGE_SHIFT,
    - page_size_mask & ((1<<PG_LEVEL_2M)
    - | (1<<PG_LEVEL_1G)));
    + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
    + page_size_mask &
    + ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));

    /* tail is not big page (1G) alignment */
    start_pfn = end_pfn;
    end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
    - if (start_pfn < end_pfn)
    - last_map_addr = kernel_physical_mapping_init(
    - start_pfn<<PAGE_SHIFT,
    - end_pfn<<PAGE_SHIFT,
    - page_size_mask & (1<<PG_LEVEL_2M));
    + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
    + page_size_mask & (1<<PG_LEVEL_2M));
    +
    /* tail is not big page (2M) alignment */
    start_pfn = end_pfn;
    end_pfn = end>>PAGE_SHIFT;
    - if (start_pfn < end_pfn)
    + nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
    +
    + for (i = 0; i < nr_range; i++)
    + printk(KERN_DEBUG " %010lx - %010lx page %s\n",
    + mr[i].start, mr[i].end,
    + (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
    + (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
    +
    + if (!after_bootmem)
    + find_early_table_space(end);
    +
    + for (i = 0; i < nr_range; i++)
    last_map_addr = kernel_physical_mapping_init(
    - start_pfn<<PAGE_SHIFT,
    - end_pfn<<PAGE_SHIFT, 0);
    + mr[i].start, mr[i].end,
    + mr[i].page_size_mask);

    if (!after_bootmem)
    mmu_cr4_features = read_cr4();

    \
     
     \ /
      Last update: 2008-07-10 05:21    [W:0.033 / U:60.892 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site