lkml.org 
[lkml]   [2009]   [Mar]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 10/25] Calculate the alloc_flags for allocation only once
    Date
    Factor out the mapping between GFP and alloc_flags only once. Once factored
    out, it only needs to be calculated once but some care must be taken.

    [neilb@suse.de says]
    As the test:

    - if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
    - && !in_interrupt()) {
    - if (!(gfp_mask & __GFP_NOMEMALLOC)) {

    has been replaced with a slightly weaker one:

    + if (alloc_flags & ALLOC_NO_WATERMARKS) {

    we need to ensure we don't recurse when PF_MEMALLOC is set.

    From: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
    ---
    mm/page_alloc.c | 88 +++++++++++++++++++++++++++++++-----------------------
    1 files changed, 50 insertions(+), 38 deletions(-)

    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index 8771de3..0558eb4 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -1593,16 +1593,6 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
    return page;
    }

    -static inline int
    -is_allocation_high_priority(struct task_struct *p, gfp_t gfp_mask)
    -{
    - if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
    - && !in_interrupt())
    - if (!(gfp_mask & __GFP_NOMEMALLOC))
    - return 1;
    - return 0;
    -}
    -
    /*
    * This is called in the allocator slow-path if the allocation request is of
    * sufficient urgency to ignore watermarks and take other desperate measures
    @@ -1638,6 +1628,42 @@ void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
    wakeup_kswapd(zone, order);
    }

    +static inline int
    +gfp_to_alloc_flags(gfp_t gfp_mask)
    +{
    + struct task_struct *p = current;
    + int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
    + const gfp_t wait = gfp_mask & __GFP_WAIT;
    +
    + /*
    + * The caller may dip into page reserves a bit more if the caller
    + * cannot run direct reclaim, or if the caller has realtime scheduling
    + * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
    + * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
    + */
    + if (gfp_mask & __GFP_HIGH)
    + alloc_flags |= ALLOC_HIGH;
    +
    + if (!wait) {
    + alloc_flags |= ALLOC_HARDER;
    + /*
    + * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
    + * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
    + */
    + alloc_flags &= ~ALLOC_CPUSET;
    + } else if (unlikely(rt_task(p)) && !in_interrupt())
    + alloc_flags |= ALLOC_HARDER;
    +
    + if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
    + if (!in_interrupt() &&
    + ((p->flags & PF_MEMALLOC) ||
    + unlikely(test_thread_flag(TIF_MEMDIE))))
    + alloc_flags |= ALLOC_NO_WATERMARKS;
    + }
    +
    + return alloc_flags;
    +}
    +
    static inline struct page *
    __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
    struct zonelist *zonelist, enum zone_type high_zoneidx,
    @@ -1668,48 +1694,34 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
    * OK, we're below the kswapd watermark and have kicked background
    * reclaim. Now things get more complex, so set up alloc_flags according
    * to how we want to proceed.
    - *
    - * The caller may dip into page reserves a bit more if the caller
    - * cannot run direct reclaim, or if the caller has realtime scheduling
    - * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
    - * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
    */
    - alloc_flags = ALLOC_WMARK_MIN;
    - if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
    - alloc_flags |= ALLOC_HARDER;
    - if (gfp_mask & __GFP_HIGH)
    - alloc_flags |= ALLOC_HIGH;
    - if (wait)
    - alloc_flags |= ALLOC_CPUSET;
    + alloc_flags = gfp_to_alloc_flags(gfp_mask);

    restart:
    - /*
    - * Go through the zonelist again. Let __GFP_HIGH and allocations
    - * coming from realtime tasks go deeper into reserves.
    - *
    - * This is the last chance, in general, before the goto nopage.
    - * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
    - * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
    - */
    + /* This is the last chance, in general, before the goto nopage. */
    page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
    - high_zoneidx, alloc_flags,
    - preferred_zone,
    - migratetype);
    + high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
    + preferred_zone, migratetype);
    if (page)
    goto got_pg;

    /* Allocate without watermarks if the context allows */
    - if (is_allocation_high_priority(p, gfp_mask))
    + if (alloc_flags & ALLOC_NO_WATERMARKS) {
    page = __alloc_pages_high_priority(gfp_mask, order,
    - zonelist, high_zoneidx, nodemask, preferred_zone,
    - migratetype);
    - if (page)
    - goto got_pg;
    + zonelist, high_zoneidx, nodemask,
    + preferred_zone, migratetype);
    + if (page)
    + goto got_pg;
    + }

    /* Atomic allocations - we can't balance anything */
    if (!wait)
    goto nopage;

    + /* Avoid recursion of direct reclaim */
    + if (p->flags & PF_MEMALLOC)
    + goto nopage;
    +
    /* Try direct reclaim and then allocating */
    page = __alloc_pages_direct_reclaim(gfp_mask, order,
    zonelist, high_zoneidx,
    --
    1.5.6.5


    \
     
     \ /
      Last update: 2009-03-20 11:09    [W:4.307 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site