lkml.org 
[lkml]   [2005]   [Mar]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH 1/5] freepgt: free_pgtables use vma list
Andrew Morton wrote:
> With these six patches the ppc64 is hitting the BUG in exit_mmap():
>
> BUG_ON(mm->nr_ptes); /* This is just debugging */
>
> fairly early in boot.
>

No doubt Hugh will have this fixed before long... but if you
have time to spare, you may just try hitting it on the head
and making it a bit dumber. It might help show where the problem
is.

- don't span multiple vmas
- don't be so smart about avoiding unfreeable regions

I dunno. Maybe it won't help at all. Maybe it will make things
worse :P


Index: linux-2.6/mm/memory.c
===================================================================
--- linux-2.6.orig/mm/memory.c 2005-03-22 23:04:34.000000000 +1100
+++ linux-2.6/mm/memory.c 2005-03-22 23:11:31.000000000 +1100
@@ -110,13 +110,18 @@ void pmd_clear_bad(pmd_t *pmd)
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
-static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
+static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
{
- struct page *page = pmd_page(*pmd);
- pmd_clear(pmd);
- pte_free_tlb(tlb, page);
- dec_page_state(nr_page_table_pages);
- tlb->mm->nr_ptes--;
+ if (((addr & PMD_MASK) >= floor)
+ && (end - 1 <= (ceiling & PMD_MASK) - 1)) {
+ struct page *page = pmd_page(*pmd);
+ pmd_clear(pmd);
+ pte_free_tlb(tlb, page);
+ dec_page_state(nr_page_table_pages);
+ tlb->mm->nr_ptes--;
+ }
}

static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -133,7 +138,7 @@ static inline void free_pmd_range(struct
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
- free_pte_range(tlb, pmd);
+ free_pte_range(tlb, pmd, addr, end, floor, ceiling);
} while (pmd++, addr = next, addr != end);

start &= PUD_MASK;
@@ -190,18 +195,6 @@ void free_pgd_range(struct mmu_gather **
unsigned long next;
unsigned long start;

- addr &= PMD_MASK;
- if (addr < floor) {
- addr += PMD_SIZE;
- if (!addr)
- return;
- }
- ceiling &= PMD_MASK;
- if (end - 1 > ceiling - 1)
- end -= PMD_SIZE;
- if (addr > end - 1)
- return;
-
start = addr;
pgd = pgd_offset((*tlb)->mm, addr);
do {
@@ -226,14 +219,6 @@ void free_pgtables(struct mmu_gather **t
hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
} else {
- /*
- * Optimization: gather nearby vmas into one call down
- */
- while (next && next->vm_start <= vma->vm_end + PMD_SIZE
- && !is_hugepage_only_range(next->vm_start, HPAGE_SIZE)){
- vma = next;
- next = vma->vm_next;
- }
free_pgd_range(tlb, addr, vma->vm_end,
floor, next? next->vm_start: ceiling);
}
\
 
 \ /
  Last update: 2005-04-06 13:30    [W:0.127 / U:0.388 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site