lkml.org 
[lkml]   [1997]   [Dec]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: 2.1.76, nfs client, and memory fragmentation
Date
In article <199712282349.RAA00548@jadrek.kwr>,  <kwrohrer@enteract.com> wrote:
>And lo, Alan Cox saith unto me:
>> > However, permanently non-freeable blocks should be rare, temporarily locked
>> > blocks should be uncommon, and relocatable (= swappable) blocks should
>> > abound. And with a defragmenter available, especially one which can
>> No - alas not. Also for 32K chunks you only need 1 of the 8 pages to be
>> touched to be tied down.
>You're right; the locked blocks themselves are somewhat rare but the higher-
>order blocks they lock down multiplies that by up to the maximum order...

Note that if people want to test alternatives, here's an old one that
I've done a long time ago and just re-implemented for 2.1.76. I haven't
even rebooted this kernel to test that it works at all, but it used to
work. I don't know if it is any better than what is there now, but it
might well be..

What this does is to essentially get rid of the old "minimum number of
free pages" policy, and instead makes it a "minimum allocation order
with an entry" policy. The code will refuse to use up the last entry of
some allocation order for normal allocations - and the maximum allowable
order depends on the type of allocation.

It might need some tweaks (and some bugfixes for all I know), but it
might be worth testing out if you have problems with or simply don't
like the current scheme.

Linus

----
--- v2.1.76/linux/mm/page_alloc.c Mon Jun 16 16:36:01 1997
+++ linux/mm/page_alloc.c Sun Dec 28 18:59:51 1997
@@ -161,11 +161,13 @@
change_bit((index) >> (1+(order)), (area)->map)
#define CAN_DMA(x) (PageDMA(x))
#define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT))
-#define RMQUEUE(order, dma) \
+#define RMQUEUE(order, maxorder, dma) \
do { struct free_area_struct * area = free_area+order; \
unsigned long new_order = order; \
- do { struct page *prev = memory_head(area), *ret; \
- while (memory_head(area) != (ret = prev->next)) { \
+ do { struct page *prev = memory_head(area), *ret = prev->next; \
+ while (memory_head(area) != ret) { \
+ if (new_order >= maxorder && ret->next == prev) \
+ break; \
if (!dma || CAN_DMA(ret)) { \
unsigned long map_nr = ret->map_nr; \
(prev->next = ret->next)->prev = prev; \
@@ -176,6 +178,7 @@
return ADDRESS(map_nr); \
} \
prev = ret; \
+ ret = ret->next; \
} \
new_order++; area++; \
} while (new_order < NR_MEM_LISTS); \
@@ -196,11 +199,23 @@

unsigned long __get_free_pages(int priority, unsigned long order, int dma)
{
- unsigned long flags;
- int reserved_pages;
+ unsigned long flags, maxorder;

if (order >= NR_MEM_LISTS)
- return 0;
+ goto nopage;
+
+ /*
+ * "maxorder" is the highest order number that we're allowed
+ * to empty in order to find a free page..
+ */
+ maxorder = order + NR_MEM_LISTS/3;
+ switch (priority) {
+ case GFP_ATOMIC:
+ maxorder = NR_MEM_LISTS;
+ /* fallthrough - no need to jump around */
+ case GFP_NFS:
+ maxorder += NR_MEM_LISTS/3;
+ }

if (in_interrupt() && priority != GFP_ATOMIC) {
static int count = 0;
@@ -211,19 +226,13 @@
}
}

- reserved_pages = 5;
- if (priority != GFP_NFS)
- reserved_pages = min_free_pages;
repeat:
spin_lock_irqsave(&page_alloc_lock, flags);
- if ((priority==GFP_ATOMIC) || nr_free_pages > reserved_pages) {
- RMQUEUE(order, dma);
- spin_unlock_irqrestore(&page_alloc_lock, flags);
- return 0;
- }
+ RMQUEUE(order, maxorder, dma);
spin_unlock_irqrestore(&page_alloc_lock, flags);
- if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
+ if (priority != GFP_BUFFER && priority != GFP_ATOMIC && try_to_free_page(priority, dma, 1))
goto repeat;
+nopage:
return 0;
}



\
 
 \ /
  Last update: 2005-03-22 13:40    [W:0.043 / U:0.600 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site