lkml.org 
[lkml]   [1998]   [Oct]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [patch] my latest oom stuff
On Mon, 26 Oct 1998, Andrea Arcangeli wrote:

>>Could you try the above simple approach?
>
>Sure.

Seems to works fine here.

Here the patch against your pre-2.1.127-1:

Index: linux/mm/page_alloc.c
diff -u linux/mm/page_alloc.c:1.1.1.2 linux/mm/page_alloc.c:1.1.1.1.18.4
--- linux/mm/page_alloc.c:1.1.1.2 Sat Oct 24 15:42:51 1998
+++ linux/mm/page_alloc.c Sat Oct 24 20:25:17 1998
@@ -237,45 +237,29 @@
unsigned long __get_free_pages(int gfp_mask, unsigned long order)
{
unsigned long flags;
+ int again = 0;
+ int wait = gfp_mask & __GFP_WAIT;

if (order >= NR_MEM_LISTS)
goto nopage;

- if (gfp_mask & __GFP_WAIT) {
- if (in_interrupt()) {
- static int count = 0;
- if (++count < 5) {
- printk("gfp called nonatomically from interrupt %p\n",
- __builtin_return_address(0));
- }
- goto nopage;
- }
-
- if (freepages.min > nr_free_pages) {
- int freed;
- freed = try_to_free_pages(gfp_mask, SWAP_CLUSTER_MAX);
- /*
- * Low priority (user) allocations must not
- * succeed if we didn't have enough memory
- * and we couldn't get more..
- */
- if (!freed && !(gfp_mask & (__GFP_MED | __GFP_HIGH)))
- goto nopage;
- }
+ if (wait && in_interrupt()) {
+ printk("gfp called nonatomically from interrupt %p\n",
+ __builtin_return_address(0));
+ goto nopage;
}
+ again:
spin_lock_irqsave(&page_alloc_lock, flags);
RMQUEUE(order, (gfp_mask & GFP_DMA));
spin_unlock_irqrestore(&page_alloc_lock, flags);
+
+ if (!again && wait)
+ {
+ again = 1;
+ if (try_to_free_pages(gfp_mask, SWAP_CLUSTER_MAX))
+ goto again;
+ }

- /*
- * If we failed to find anything, we'll return NULL, but we'll
- * wake up kswapd _now_ ad even wait for it synchronously if
- * we can.. This way we'll at least make some forward progress
- * over time.
- */
- wake_up(&kswapd_wait);
- if (gfp_mask & __GFP_WAIT)
- schedule();
nopage:
return 0;
}
Index: linux/mm/vmscan.c
diff -u linux/mm/vmscan.c:1.1.1.3 linux/mm/vmscan.c:1.1.1.2.4.14
--- linux/mm/vmscan.c:1.1.1.3 Sat Oct 24 15:42:52 1998
+++ linux/mm/vmscan.c Mon Oct 26 01:44:59 1998
@@ -29,11 +29,6 @@
#include <asm/pgtable.h>

/*
- * When are we next due for a page scan?
- */
-static unsigned long next_swap_jiffies = 0;
-
-/*
* How often do we do a pageout scan during normal conditions?
* Default is four times a second.
*/
@@ -42,7 +37,7 @@
/*
* The wait queue for waking up the pageout daemon:
*/
-struct wait_queue * kswapd_wait = NULL;
+struct task_struct * kswapd_task = NULL;

static void init_swap_timer(void);

@@ -447,39 +442,43 @@
static int do_try_to_free_page(int gfp_mask)
{
static int state = 0;
- int i=6;
- int stop;
+ int from_prio, to_prio;

/* Always trim SLAB caches when memory gets low. */
kmem_cache_reap(gfp_mask);

/* We try harder if we are waiting .. */
- stop = 3;
if (gfp_mask & __GFP_WAIT)
- stop = 0;
+ {
+ from_prio = 3;
+ to_prio = 0;
+ } else {
+ from_prio = 6;
+ to_prio = 3;
+ }

if (buffer_over_borrow() || pgcache_over_borrow())
- shrink_mmap(i, gfp_mask);
+ state = 0;

switch (state) {
do {
case 0:
- if (shrink_mmap(i, gfp_mask))
+ if (shrink_mmap(from_prio, gfp_mask))
return 1;
state = 1;
case 1:
- if (shm_swap(i, gfp_mask))
+ if (shm_swap(from_prio, gfp_mask))
return 1;
state = 2;
case 2:
- if (swap_out(i, gfp_mask))
+ if (swap_out(from_prio, gfp_mask))
return 1;
state = 3;
case 3:
- shrink_dcache_memory(i, gfp_mask);
+ shrink_dcache_memory(from_prio, gfp_mask);
state = 0;
- i--;
- } while ((i - stop) >= 0);
+ from_prio--;
+ } while (from_prio >= to_prio);
}
return 0;
}
@@ -509,8 +508,6 @@
*/
int kswapd(void *unused)
{
- struct wait_queue wait = { current, NULL };
-
current->session = 1;
current->pgrp = 1;
strcpy(current->comm, "kswapd");
@@ -523,12 +520,6 @@
*/
lock_kernel();

- /* Give kswapd a realtime priority. */
- current->policy = SCHED_FIFO;
- current->rt_priority = 32; /* Fixme --- we need to standardise our
- namings for POSIX.4 realtime scheduling
- priorities. */
-
/*
* Tell the memory management that we're a "memory allocator",
* and that if we need more memory we should get access to it
@@ -544,35 +535,24 @@
current->flags |= PF_MEMALLOC;

init_swap_timer();
- add_wait_queue(&kswapd_wait, &wait);
+ kswapd_task = current;
while (1) {
- int tries;
+ unsigned long start;

- current->state = TASK_INTERRUPTIBLE;
- flush_signals(current);
run_task_queue(&tq_disk);
+ flush_signals(current);
+ /*
+ * Remeber to enable up the swap tick before go to sleep.
+ */
+ timer_active |= 1<<SWAP_TIMER;
+ current->state = TASK_INTERRUPTIBLE;
schedule();
swapstats.wakeups++;

/*
- * Do the background pageout: be
- * more aggressive if we're really
- * low on free memory.
- *
- * We try page_daemon.tries_base times, divided by
- * an 'urgency factor'. In practice this will mean
- * a value of pager_daemon.tries_base / 8 or 4 = 64
- * or 128 pages at a time.
- * This gives us 64 (or 128) * 4k * 4 (times/sec) =
- * 1 (or 2) MB/s swapping bandwidth in low-priority
- * background paging. This number rises to 8 MB/s
- * when the priority is highest (but then we'll be
- * woken up more often and the rate will be even
- * higher).
+ * Do the pageout for at most one jiffy.
*/
- tries = pager_daemon.tries_base;
- tries >>= 4*free_memory_available();
-
+ start =jiffies;
do {
do_try_to_free_page(0);
/*
@@ -581,12 +561,13 @@
*/
if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster)
run_task_queue(&tq_disk);
- if (free_memory_available() > 1)
+ if (free_memory_available() == 2 && buffer_under_max()
+ && pgcache_under_max())
break;
- } while (--tries > 0);
+ } while (jiffies == start);
}
/* As if we could ever get here - maybe we want to make this killable */
- remove_wait_queue(&kswapd_wait, &wait);
+ kswapd_task = NULL;
unlock_kernel();
return 0;
}
@@ -598,22 +579,20 @@
*
* The "PF_MEMALLOC" flag protects us against recursion:
* if we need more memory as part of a swap-out effort we
- * will just silently return "success" to tell the page
- * allocator to accept the allocation.
+ * will just silently return "fail" to tell the page
+ * allocator that we are recursing.
*/
int try_to_free_pages(unsigned int gfp_mask, int count)
{
- int retval = 1;
+ int retval = 0;

lock_kernel();
if (!(current->flags & PF_MEMALLOC)) {
+ retval = 1;
current->flags |= PF_MEMALLOC;
- do {
- retval = do_try_to_free_page(gfp_mask);
- if (!retval)
+ while (count--)
+ if (!do_try_to_free_page(gfp_mask))
break;
- count--;
- } while (count > 0);
current->flags &= ~PF_MEMALLOC;
}
unlock_kernel();
@@ -625,37 +604,17 @@
*/
void swap_tick(void)
{
- unsigned long now, want;
- int want_wakeup = 0;
-
- want = next_swap_jiffies;
- now = jiffies;
-
/*
- * Examine the memory queues. Mark memory low
- * if there is nothing available in the three
- * highest queues.
- *
* Schedule for wakeup if there isn't lots
- * of free memory.
+ * of free memory or if there is too much
+ * of it used for buffers or pgcache.
*/
- switch (free_memory_available()) {
- case 0:
- want = now;
- /* Fall through */
- case 1:
- want_wakeup = 1;
- default:
- }
-
- if ((long) (now - want) >= 0) {
- if (want_wakeup || buffer_over_max() || pgcache_over_max()) {
- /* Set the next wake-up time */
- next_swap_jiffies = now + swapout_interval;
- kswapd_wakeup();
- }
- }
- timer_active |= (1<<SWAP_TIMER);
+
+ if (free_memory_available() < 2 || buffer_over_max() ||
+ pgcache_over_max())
+ kswapd_wakeup();
+ else
+ timer_active |= (1<<SWAP_TIMER);
}

/*
Index: linux/kernel/fork.c
diff -u linux/kernel/fork.c:1.1.1.2 linux/kernel/fork.c:1.1.1.2.4.2
--- linux/kernel/fork.c:1.1.1.2 Fri Oct 9 17:44:09 1998
+++ linux/kernel/fork.c Sun Oct 25 02:43:48 1998
@@ -296,6 +296,8 @@
exit_mmap(mm);
free_page_tables(mm);
kmem_cache_free(mm_cachep, mm);
+ if (free_memory_available() != 2)
+ kswapd_wakeup();
}
}

Index: linux/include/linux/mm.h
diff -u linux/include/linux/mm.h:1.1.1.2 linux/include/linux/mm.h:1.1.1.1.16.2
--- linux/include/linux/mm.h:1.1.1.2 Sat Oct 24 15:42:34 1998
+++ linux/include/linux/mm.h Sun Oct 25 02:43:49 1998
@@ -254,12 +254,6 @@

/* memory.c & swap.c*/

-/*
- * Decide if we should try to do some swapout..
- */
-extern int free_memory_available(void);
-extern struct wait_queue * kswapd_wait;
-
#define free_page(addr) free_pages((addr),0)
extern void FASTCALL(free_pages(unsigned long addr, unsigned long order));
extern void FASTCALL(__free_page(struct page *));
@@ -330,6 +324,19 @@

#define GFP_DMA __GFP_DMA

+/*
+ * Decide if we should try to do some swapout..
+ */
+extern int free_memory_available(void);
+extern struct task_struct * kswapd_task;
+
+static inline void kswapd_wakeup(void)
+{
+ struct task_struct *p = kswapd_task;
+ if (p)
+ wake_up_process(p);
+}
+
/* vma is the first one with address < vma->vm_end,
* and even address < vma->vm_start. Have to extend vma. */
static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
@@ -378,11 +385,6 @@
if (vma && end_addr <= vma->vm_start)
vma = NULL;
return vma;
-}
-
-extern __inline__ void kswapd_wakeup(void)
-{
- wake_up(&kswapd_wait);
}

#define buffer_under_min() ((buffermem >> PAGE_SHIFT) * 100 < \


Note also the change of try_to_free_pages(). In this patch returining 1
means that we are not recursing (not that we have freed a SWAP_CLUSTER_MAX
as in the previous patches) and so it means that __get_free_pages() can do
another try to see if it' s able to allocate memory at the second try.

Avoiding the second try if one of the do_try_to_free_pages() didn' t
worked (as was before) is more bad than stopping kswapd as I was used to
do in my previous patch I think (and a second try is very very light
compared to a do_try_to_free_pages() done at priority 0). I just know
that, but in my last patches I was worried to return NULL not too
difficulty. Now that kswapd is fixed it seems that we can do the right
things in try_to_free_pages() without deadlocking.
Theorically we could also do a:

while (count--)
do_try_to_free_pages():
and never break the cycle, but I suspect that since we are doing the
pageout with a very high priority (from 3 to 0), the first fail means that
we are going to fail also in the future...

Note also the fix to kswapd that now stop only if the buffer and page
cache are under max.

Andrea Arcangeli


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:45    [W:1.862 / U:0.076 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site