lkml.org 
[lkml]   [2010]   [Mar]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 21/33] x86, lmb: Add x86 version of __lmb_find_area()
Date
Generic version is going from high to low, and it seems it can not find
right area compact enough.

the x86 version will go from goal to limit and just like the way We used
for early_res

use ARCH_FIND_LMB_AREA to select from them.

For 32 bit have to use CONFIG_ARCH_FIND_LMB_AREA=y, because some alloc_bootmem
in nobootmem config, hard code -1ULL as limit

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
---
arch/x86/Kconfig | 8 +++++++
arch/x86/mm/lmb.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 65 insertions(+), 0 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a80bce..0052d27 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -585,6 +585,14 @@ config PARAVIRT_DEBUG
Enable to debug paravirt_ops internals. Specifically, BUG if
a paravirt_op is missing when it is called.

+config ARCH_LMB_FIND_AREA
+ default y
+ bool "Use x86 own lmb_find_area()"
+ ---help---
+ Use lmb_find_area() version instead of generic version, it get free
+ area up from low.
+ Generic one try to get free area down from limit.
+
config NO_BOOTMEM
default y
bool "Disable Bootmem code"
diff --git a/arch/x86/mm/lmb.c b/arch/x86/mm/lmb.c
index 3229e9e..d221672 100644
--- a/arch/x86/mm/lmb.c
+++ b/arch/x86/mm/lmb.c
@@ -86,3 +86,60 @@ u64 __init lmb_find_area_size(u64 start, u64 *sizep, u64 align)
return -1ULL;
}

+#ifdef CONFIG_ARCH_LMB_FIND_AREA
+static int __init find_overlapped_early(u64 start, u64 end)
+{
+ int i;
+ struct lmb_property *r;
+
+ for (i = 0; i < lmb.reserved.cnt && lmb.reserved.region[i].size; i++) {
+ r = &lmb.reserved.region[i];
+ if (end > r->base && start < (r->base + r->size))
+ break;
+ }
+
+ return i;
+}
+
+/* Check for already reserved areas */
+static inline bool __init bad_addr(u64 *addrp, u64 size, u64 align)
+{
+ int i;
+ u64 addr = *addrp;
+ bool changed = false;
+ struct lmb_property *r;
+again:
+ i = find_overlapped_early(addr, addr + size);
+ r = &lmb.reserved.region[i];
+ if (i < lmb.reserved.cnt && r->size) {
+ *addrp = addr = round_up(r->base + r->size, align);
+ changed = true;
+ goto again;
+ }
+ return changed;
+}
+
+u64 __init __lmb_find_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
+ u64 size, u64 align)
+{
+ u64 addr, last;
+
+ addr = round_up(ei_start, align);
+ if (addr < start)
+ addr = round_up(start, align);
+ if (addr >= ei_last)
+ goto out;
+ while (bad_addr(&addr, size, align) && addr+size <= ei_last)
+ ;
+ last = addr + size;
+ if (last > ei_last)
+ goto out;
+ if (last > end)
+ goto out;
+
+ return addr;
+
+out:
+ return -1ULL;
+}
+#endif
--
1.6.4.2


\
 
 \ /
  Last update: 2010-03-31 04:31    [W:0.575 / U:0.064 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site