lkml.org 
[lkml]   [2015]   [Apr]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [patch] mm, memcg: sync allocation and memcg charge gfp flags for thp fix fix
On Thu 02-04-15 18:41:18, David Rientjes wrote:
> "mm, memcg: sync allocation and memcg charge gfp flags for THP" in -mm
> introduces a formal to pass the gfp mask for khugepaged's hugepage
> allocation. This is just too ugly to live.
>
> alloc_hugepage_gfpmask() cannot differ between NUMA and UMA configs by
> anything in GFP_RECLAIM_MASK, which is the only thing that matters for
> memcg reclaim, so just determine the gfp flags once in
> collapse_huge_page() and avoid the complexity.
>
> Signed-off-by: David Rientjes <rientjes@google.com>

Thanks for this cleanup!

Acked-by: Michal Hocko <mhocko@suse.cz>
> ---
> -mm: intended to be folded into
> mm-memcg-sync-allocation-and-memcg-charge-gfp-flags-for-thp.patch
>
> mm/huge_memory.c | 21 ++++++++-------------
> 1 file changed, 8 insertions(+), 13 deletions(-)
>
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2373,16 +2373,12 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
> }
>
> static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
> struct vm_area_struct *vma, unsigned long address,
> int node)
> {
> VM_BUG_ON_PAGE(*hpage, *hpage);
>
> - /* Only allocate from the target node */
> - *gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
> - __GFP_THISNODE;
> -
> /*
> * Before allocating the hugepage, release the mmap_sem read lock.
> * The allocation can take potentially a long time if it involves
> @@ -2391,7 +2387,7 @@ khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> */
> up_read(&mm->mmap_sem);
>
> - *hpage = alloc_pages_exact_node(node, *gfp, HPAGE_PMD_ORDER);
> + *hpage = alloc_pages_exact_node(node, gfp, HPAGE_PMD_ORDER);
> if (unlikely(!*hpage)) {
> count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
> *hpage = ERR_PTR(-ENOMEM);
> @@ -2445,18 +2441,13 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
> }
>
> static struct page *
> -khugepaged_alloc_page(struct page **hpage, gfp_t *gfp, struct mm_struct *mm,
> +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
> struct vm_area_struct *vma, unsigned long address,
> int node)
> {
> up_read(&mm->mmap_sem);
> VM_BUG_ON(!*hpage);
>
> - /*
> - * khugepaged_alloc_hugepage is doing the preallocation, use the same
> - * gfp flags here.
> - */
> - *gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), 0);
> return *hpage;
> }
> #endif
> @@ -2495,8 +2486,12 @@ static void collapse_huge_page(struct mm_struct *mm,
>
> VM_BUG_ON(address & ~HPAGE_PMD_MASK);
>
> + /* Only allocate from the target node */
> + gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
> + __GFP_THISNODE;
> +
> /* release the mmap_sem read lock. */
> - new_page = khugepaged_alloc_page(hpage, &gfp, mm, vma, address, node);
> + new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node);
> if (!new_page)
> return;
>
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

--
Michal Hocko
SUSE Labs


\
 
 \ /
  Last update: 2015-04-03 13:21    [W:0.071 / U:0.256 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site