lkml.org 
[lkml]   [2004]   [Sep]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[RFC][PATCH] no bitmap buddy allocator : modifies expand() (2/4)
This is part (2/4) and modifies expand().

This patch removes bitmap operation from alloc_pages().

Instead of using MARK_USED() bitmap operation,
this patch records page's order in page struct itself, page->private field.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>


---

test-kernel-kamezawa/mm/page_alloc.c | 17 +++++++----------
1 files changed, 7 insertions(+), 10 deletions(-)

diff -puN mm/page_alloc.c~eliminate-bitmap-alloc mm/page_alloc.c
--- test-kernel/mm/page_alloc.c~eliminate-bitmap-alloc 2004-09-08 18:48:40.667294640 +0900
+++ test-kernel-kamezawa/mm/page_alloc.c 2004-09-08 19:04:05.398714096 +0900
@@ -296,9 +296,6 @@ void __free_pages_ok(struct page *page,
free_pages_bulk(page_zone(page), 1, &list, order);
}

-#define MARK_USED(index, order, area) \
- __change_bit((index) >> (1+(order)), (area)->map)
-
/*
* The order of subdivision here is critical for the IO subsystem.
* Please do not alter this order without good reasons and regression
@@ -315,7 +312,7 @@ void __free_pages_ok(struct page *page,
*/
static inline struct page *
expand(struct zone *zone, struct page *page,
- unsigned long index, int low, int high, struct free_area *area)
+ int low, int high, struct free_area *area)
{
unsigned long size = 1 << high;

@@ -325,7 +322,9 @@ expand(struct zone *zone, struct page *p
size >>= 1;
BUG_ON(bad_range(zone, &page[size]));
list_add(&page[size].lru, &area->free_list);
- MARK_USED(index + size, high, area);
+ /* Note: already have lock, we don't need to use atomic ops */
+ set_page_order(&page[size], high);
+ SetPagePrivate(&page[size]);
}
return page;
}
@@ -379,7 +378,6 @@ static struct page *__rmqueue(struct zon
struct free_area * area;
unsigned int current_order;
struct page *page;
- unsigned int index;

for (current_order = order; current_order < MAX_ORDER; ++current_order) {
area = zone->free_area + current_order;
@@ -388,11 +386,10 @@ static struct page *__rmqueue(struct zon

page = list_entry(area->free_list.next, struct page, lru);
list_del(&page->lru);
- index = page - zone->zone_mem_map;
- if (current_order != MAX_ORDER-1)
- MARK_USED(index, current_order, area);
+ /* Note: already have lock, we don't need to use atomic ops */
+ ClearPagePrivate(page);
zone->free_pages -= 1UL << order;
- return expand(zone, page, index, order, current_order, area);
+ return expand(zone, page, order, current_order, area);
}

return NULL;
_



--

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 14:06    [W:0.041 / U:0.228 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site