lkml.org 
[lkml]   [2015]   [Feb]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC 15/16] mm/cma: remove ALLOC_CMA
Date
Now, reserved pages for CMA are on ZONE_CMA and it only serves for
MIGRATE_MOVABLE. Therefore, we don't need to consider ALLOC_CMA at all.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
---
mm/compaction.c | 4 ----
mm/internal.h | 3 +--
mm/page_alloc.c | 16 ++--------------
3 files changed, 3 insertions(+), 20 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index f9792ba..b79134e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1312,10 +1312,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
if (!order || !may_enter_fs || !may_perform_io)
return COMPACT_SKIPPED;

-#ifdef CONFIG_CMA
- if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
- alloc_flags |= ALLOC_CMA;
-#endif
/* Compact each zone in the list */
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
nodemask) {
diff --git a/mm/internal.h b/mm/internal.h
index a4f90ba..9968dff 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -407,7 +407,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
#define ALLOC_HARDER 0x10 /* try to alloc harder */
#define ALLOC_HIGH 0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET 0x40 /* check for correct cpuset */
-#define ALLOC_CMA 0x80 /* allow allocations from CMA areas */
-#define ALLOC_FAIR 0x100 /* fair zone allocation */
+#define ALLOC_FAIR 0x80 /* fair zone allocation */

#endif /* __MM_INTERNAL_H */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f2844f0..551cc5b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1737,20 +1737,14 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
/* free_pages my go negative - that's OK */
long min = mark;
int o;
- long free_cma = 0;

free_pages -= (1 << order) - 1;
if (alloc_flags & ALLOC_HIGH)
min -= min / 2;
if (alloc_flags & ALLOC_HARDER)
min -= min / 4;
-#ifdef CONFIG_CMA
- /* If allocation can't use CMA areas don't use free CMA pages */
- if (!(alloc_flags & ALLOC_CMA))
- free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
-#endif

- if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
+ if (free_pages <= min + z->lowmem_reserve[classzone_idx])
return false;
for (o = 0; o < order; o++) {
/* At the next order, this order's pages become unavailable */
@@ -2550,10 +2544,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
unlikely(test_thread_flag(TIF_MEMDIE))))
alloc_flags |= ALLOC_NO_WATERMARKS;
}
-#ifdef CONFIG_CMA
- if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
- alloc_flags |= ALLOC_CMA;
-#endif
+
return alloc_flags;
}

@@ -2837,9 +2828,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;

- if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE)
- alloc_flags |= ALLOC_CMA;
-
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();

--
1.7.9.5


\
 
 \ /
  Last update: 2015-02-12 08:41    [W:0.285 / U:0.192 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site