lkml.org 
[lkml]   [2012]   [Apr]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [PATCH 02/16] mm: sl[au]b: Add knowledge of PFMEMALLOC reserve pages
    On Mon, 16 Apr 2012, Mel Gorman wrote:

    > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    > index 280eabe..0fa2c72 100644
    > --- a/mm/page_alloc.c
    > +++ b/mm/page_alloc.c
    > @@ -1463,6 +1463,7 @@ failed:
    > #define ALLOC_HARDER 0x10 /* try to alloc harder */
    > #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
    > #define ALLOC_CPUSET 0x40 /* check for correct cpuset */
    > +#define ALLOC_PFMEMALLOC 0x80 /* Caller has PF_MEMALLOC set */
    >
    > #ifdef CONFIG_FAIL_PAGE_ALLOC
    >
    > @@ -2208,16 +2209,22 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
    > } else if (unlikely(rt_task(current)) && !in_interrupt())
    > alloc_flags |= ALLOC_HARDER;
    >
    > - if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
    > - if (!in_interrupt() &&
    > - ((current->flags & PF_MEMALLOC) ||
    > - unlikely(test_thread_flag(TIF_MEMDIE))))
    > + if ((current->flags & PF_MEMALLOC) ||
    > + unlikely(test_thread_flag(TIF_MEMDIE))) {
    > + alloc_flags |= ALLOC_PFMEMALLOC;
    > +
    > + if (likely(!(gfp_mask & __GFP_NOMEMALLOC)) && !in_interrupt())
    > alloc_flags |= ALLOC_NO_WATERMARKS;
    > }
    >
    > return alloc_flags;
    > }
    >
    > +bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
    > +{
    > + return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_PFMEMALLOC);
    > +}
    > +
    > static inline struct page *
    > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
    > struct zonelist *zonelist, enum zone_type high_zoneidx,
    > @@ -2407,8 +2414,16 @@ nopage:
    > got_pg:
    > if (kmemcheck_enabled)
    > kmemcheck_pagealloc_alloc(page, order, gfp_mask);
    > - return page;
    >
    > + /*
    > + * page->pfmemalloc is set when the caller had PFMEMALLOC set or is
    > + * been OOM killed. The expectation is that the caller is taking
    > + * steps that will free more memory. The caller should avoid the
    > + * page being used for !PFMEMALLOC purposes.
    > + */
    > + page->pfmemalloc = !!(alloc_flags & ALLOC_PFMEMALLOC);
    > +
    > + return page;
    > }
    >
    > /*

    I think this is slightly inconsistent if the page allocation succeeded
    without needing ALLOC_NO_WATERMARKS, meaning that page was allocated above
    the min watermark. That's possible if the slowpath's first call to
    get_page_from_freelist() succeeds without needing
    __alloc_pages_high_priority(). So perhaps we need to do something like

    got_pg_memalloc:
    ...
    page->pfmemalloc = !!(alloc_flags & ALLOC_PFMEMALLOC);
    got_pg:
    if (kmemcheck_enabled)
    kmemcheck_pagealloc_alloc(page, order, gfp_mask);
    return page;

    and use got_pg_memalloc everywhere we currently use got_pg other than the
    when it succeeds with ALLOC_NO_WATERMARKS.

    > @@ -2459,6 +2474,8 @@ retry_cpuset:
    > page = __alloc_pages_slowpath(gfp_mask, order,
    > zonelist, high_zoneidx, nodemask,
    > preferred_zone, migratetype);
    > + else
    > + page->pfmemalloc = false;
    >
    > trace_mm_page_alloc(page, order, gfp_mask, migratetype);
    >


    \
     
     \ /
      Last update: 2012-04-24 01:53    [W:3.816 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site