lkml.org 
[lkml]   [2012]   [Feb]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    From
    Date
    SubjectRe: [Linaro-mm-sig] [PATCHv23 12/16] mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks
    2012/2/23 Marek Szyprowski <m.szyprowski@samsung.com>:
    > alloc_contig_range() performs memory allocation so it also should keep
    > track on keeping the correct level of memory watermarks. This commit adds
    > a call to *_slowpath style reclaim to grab enough pages to make sure that
    > the final collection of contiguous pages from freelists will not starve
    > the system.
    >
    > Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
    > Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
    > CC: Michal Nazarewicz <mina86@mina86.com>
    > Tested-by: Rob Clark <rob.clark@linaro.org>
    > Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
    > Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
    > Tested-by: Robert Nelson <robertcnelson@gmail.com>
    > ---
    >  include/linux/mmzone.h |    9 +++++++
    >  mm/page_alloc.c        |   62 ++++++++++++++++++++++++++++++++++++++++++++++++
    >  2 files changed, 71 insertions(+), 0 deletions(-)
    >
    > diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
    > index 4781f30..77db8c0 100644
    > --- a/include/linux/mmzone.h
    > +++ b/include/linux/mmzone.h
    > @@ -63,8 +63,10 @@ enum {
    >
    >  #ifdef CONFIG_CMA
    >  #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
    > +#  define cma_wmark_pages(zone)        zone->min_cma_pages
    >  #else
    >  #  define is_migrate_cma(migratetype) false
    > +#  define cma_wmark_pages(zone) 0
    >  #endif
    >
    >  #define for_each_migratetype_order(order, type) \
    > @@ -371,6 +373,13 @@ struct zone {
    >        /* see spanned/present_pages for more description */
    >        seqlock_t               span_seqlock;
    >  #endif
    > +#ifdef CONFIG_CMA
    > +       /*
    > +        * CMA needs to increase watermark levels during the allocation
    > +        * process to make sure that the system is not starved.
    > +        */
    > +       unsigned long           min_cma_pages;
    > +#endif
    >        struct free_area        free_area[MAX_ORDER];
    >
    >  #ifndef CONFIG_SPARSEMEM
    > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    > index 7a0d286..39cd74f 100644
    > --- a/mm/page_alloc.c
    > +++ b/mm/page_alloc.c
    > @@ -5092,6 +5092,11 @@ static void __setup_per_zone_wmarks(void)
    >                                        low + (min >> 2);
    >                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
    >                                        low + (min >> 1);
    > +
    > +               zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
    > +               zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
    > +               zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
    > +
    >                setup_zone_migrate_reserve(zone);
    >                spin_unlock_irqrestore(&zone->lock, flags);
    >        }
    > @@ -5695,6 +5700,56 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
    >        return ret > 0 ? 0 : ret;
    >  }
    >
    > +/*
    > + * Update zone's cma pages counter used for watermark level calculation.
    > + */
    > +static inline void __update_cma_watermarks(struct zone *zone, int count)
    > +{
    > +       unsigned long flags;
    > +       spin_lock_irqsave(&zone->lock, flags);
    > +       zone->min_cma_pages += count;
    > +       spin_unlock_irqrestore(&zone->lock, flags);
    > +       setup_per_zone_wmarks();
    > +}
    > +
    > +/*
    > + * Trigger memory pressure bump to reclaim some pages in order to be able to
    > + * allocate 'count' pages in single page units. Does similar work as
    > + *__alloc_pages_slowpath() function.
    > + */
    > +static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
    > +{
    > +       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
    > +       struct zonelist *zonelist = node_zonelist(0, gfp_mask);
    > +       int did_some_progress = 0;
    > +       int order = 1;
    > +       unsigned long watermark;
    > +
    > +       /*
    > +        * Increase level of watermarks to force kswapd do his job
    > +        * to stabilise at new watermark level.
    > +        */
    > +       __update_cma_watermarks(zone, count);
    > +
    > +       /* Obey watermarks as if the page was being allocated */
    > +       watermark = low_wmark_pages(zone) + count;
    > +       while (!zone_watermark_ok(zone, 0, watermark, 0, 0)) {
    > +               wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
    > +
    > +               did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
    > +                                                     NULL);
    > +               if (!did_some_progress) {
    > +                       /* Exhausted what can be done so it's blamo time */
    > +                       out_of_memory(zonelist, gfp_mask, order, NULL);

    out_of_memory() has got another param in the newest next/master tree,
    out_of_memory(zonelist, gfp_mask, order, NULL, false) should be OK.

    -barry
    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

    \
     
     \ /
      Last update: 2012-02-29 10:51    [W:0.028 / U:31.760 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site