lkml.org 
[lkml]   [2010]   [Nov]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 53 of 66] add numa awareness to hugepage allocations
> @@ -1655,7 +1672,11 @@ static void collapse_huge_page(struct mm
> unsigned long hstart, hend;
>
> VM_BUG_ON(address & ~HPAGE_PMD_MASK);
> +#ifndef CONFIG_NUMA
> VM_BUG_ON(!*hpage);
> +#else
> + VM_BUG_ON(*hpage);
> +#endif
>
> /*
> * Prevent all access to pagetables with the exception of
> @@ -1693,7 +1714,15 @@ static void collapse_huge_page(struct mm
> if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
> goto out;
>
> +#ifndef CONFIG_NUMA
> new_page = *hpage;
> +#else
> + new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address);
> + if (unlikely(!new_page)) {
> + *hpage = ERR_PTR(-ENOMEM);
> + goto out;
> + }
> +#endif
> if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
> goto out;
>
I think this should be:

if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
#ifdef CONFIG_NUMA
put_page(new_page);
#endif
goto out;
}
Thanks,
Daisuke Nishimura.

> @@ -1724,6 +1753,9 @@ static void collapse_huge_page(struct mm
> spin_unlock(&mm->page_table_lock);
> anon_vma_unlock(vma->anon_vma);
> mem_cgroup_uncharge_page(new_page);
> +#ifdef CONFIG_NUMA
> + put_page(new_page);
> +#endif
> goto out;
> }
>
> @@ -1759,7 +1791,9 @@ static void collapse_huge_page(struct mm
> mm->nr_ptes--;
> spin_unlock(&mm->page_table_lock);
>
> +#ifndef CONFIG_NUMA
> *hpage = NULL;
> +#endif
> khugepaged_pages_collapsed++;
> out:
> up_write(&mm->mmap_sem);


\
 
 \ /
  Last update: 2010-11-29 06:53    [from the cache]
©2003-2014 Jasper Spaans. Advertise on this site