lkml.org 
[lkml]   [2016]   [Aug]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] mm: optimize find_zone_movable_pfns_for_nodes to avoid unnecessary loop.
Date
From: zhong jiang <zhongjiang@huawei.com>

when required_kernelcore decrease to zero, we should exit the loop in time.
because It will waste time to scan the remainder node.

Signed-off-by: zhong jiang <zhongjiang@huawei.com>
---
mm/page_alloc.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ea759b9..be7df17 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6093,7 +6093,7 @@ static unsigned long __init early_calculate_totalpages(void)
unsigned long pages = end_pfn - start_pfn;

totalpages += pages;
- if (pages)
+ if (!node_isset(nid, node_states[N_MEMORY]) && pages)
node_set_state(nid, N_MEMORY);
}
return totalpages;
@@ -6115,6 +6115,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
unsigned long totalpages = early_calculate_totalpages();
int usable_nodes = nodes_weight(node_states[N_MEMORY]);
struct memblock_region *r;
+ bool avoid_loop = false;

/* Need to find movable_zone earlier when movable_node is specified. */
find_usable_zone_for_movable();
@@ -6275,6 +6276,8 @@ restart:
required_kernelcore -= min(required_kernelcore,
size_pages);
kernelcore_remaining -= size_pages;
+ if (!required_kernelcore && avoid_loop)
+ goto out2;
if (!kernelcore_remaining)
break;
}
@@ -6287,9 +6290,10 @@ restart:
* satisfied
*/
usable_nodes--;
- if (usable_nodes && required_kernelcore > usable_nodes)
+ if (usable_nodes && required_kernelcore > usable_nodes) {
+ avoid_loop = true;
goto restart;
-
+ }
out2:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid < MAX_NUMNODES; nid++)
--
1.8.3.1
\
 
 \ /
  Last update: 2016-08-05 16:41    [W:0.083 / U:0.180 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site