lkml.org 
[lkml]   [1999]   [Sep]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch] inode dynamic allocation
I rewrote the inode memory management to make it dynamic. The inode
memory managmenet is very similar to the dcache memory management. Only
valid/useful inodes lives in memory. The inodes with i_count zero (so the
inodes that we may shrink) are queued in a separate list so at shrink-time
we'll try to free only the caching-inodes and not the in-use inodes.

The icache is shrink after the dcache in do_try_to_free_pages.

The thing seems to works fine and I believe it's the right design. It
completly avoids any kind of DoS since as we are low on memory the cache
is automatically shrunk and it allow us to cache really lots of inodes (so
the dcache can safely grow a lot).

Here it is the patch against 2.3.18ac2:

diff -urN 2.3.18ac2/fs/dcache.c 2.3.18ac2-inode/fs/dcache.c
--- 2.3.18ac2/fs/dcache.c Wed Sep 8 00:26:01 1999
+++ 2.3.18ac2-inode/fs/dcache.c Mon Sep 13 19:15:48 1999
@@ -284,8 +284,10 @@
* something (at which point we need to unuse
* all dentries).
*/
-void prune_dcache(int count)
+int prune_dcache(int goal)
{
+ int count = 0;
+
for (;;) {
struct dentry *dentry;
struct list_head *tmp = dentry_unused.prev;
@@ -298,10 +300,13 @@
dentry = list_entry(tmp, struct dentry, d_lru);
if (!dentry->d_count) {
prune_one_dentry(dentry);
- if (!--count)
+ count++;
+ if (!--goal)
break;
}
}
+
+ return count;
}

/*
@@ -470,16 +475,20 @@
* ...
* 6 - base-level: try to shrink a bit.
*/
-void shrink_dcache_memory(int priority, unsigned int gfp_mask)
+int shrink_dcache_memory(int priority, unsigned int gfp_mask)
{
+ int ret = 0;
+
if (gfp_mask & __GFP_IO) {
int count = 0;
lock_kernel();
if (priority)
count = dentry_stat.nr_unused / priority;
- prune_dcache(count);
+ ret = prune_dcache(count);
unlock_kernel();
}
+
+ return ret;
}

#define NAME_ALLOC_LEN(len) ((len+16) & ~15)
diff -urN 2.3.18ac2/fs/dquot.c 2.3.18ac2-inode/fs/dquot.c
--- 2.3.18ac2/fs/dquot.c Wed Sep 8 00:26:01 1999
+++ 2.3.18ac2-inode/fs/dquot.c Mon Sep 13 19:15:48 1999
@@ -520,7 +520,7 @@
if (count) {
printk(KERN_DEBUG "get_empty_dquot: pruning %d\n", count);
prune_dcache(count);
- free_inode_memory(count);
+ prune_icache(count);
goto repeat;
}

diff -urN 2.3.18ac2/fs/inode.c 2.3.18ac2-inode/fs/inode.c
--- 2.3.18ac2/fs/inode.c Wed Sep 8 00:26:02 1999
+++ 2.3.18ac2-inode/fs/inode.c Mon Sep 13 19:15:48 1999
@@ -10,6 +10,7 @@
#include <linux/dcache.h>
#include <linux/init.h>
#include <linux/quotaops.h>
+#include <linux/slab.h>

/*
* New inode.c implementation.
@@ -21,6 +22,8 @@
* Famous last words.
*/

+/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
+
#define INODE_PARANOIA 1
/* #define INODE_DEBUG 1 */

@@ -36,9 +39,9 @@
* Each inode can be on two separate lists. One is
* the hash list of the inode, used for lookups. The
* other linked list is the "type" list:
- * "in_use" - valid inode, hashed if i_nlink > 0
- * "dirty" - valid inode, hashed if i_nlink > 0, dirty.
- * "unused" - ready to be re-used. Not hashed.
+ * "in_use" - valid inode, i_count > 0, i_nlink > 0
+ * "dirty" - as "in_use" but also dirty
+ * "unused" - valid inode, i_count = 0
*
* A "dirty" list is maintained for each super block,
* allowing for low-overhead inode sync() operations.
@@ -61,11 +64,36 @@
*/
struct {
int nr_inodes;
- int nr_free_inodes;
+ int nr_unused;
int dummy[5];
} inodes_stat = {0, 0,};

-int max_inodes;
+static kmem_cache_t * inode_cachep;
+
+#define alloc_inode() \
+ ((struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL))
+#define destroy_inode(inode) kmem_cache_free(inode_cachep, (inode))
+
+/*
+ * These are initializations that only need to be done
+ * once, because the fields are idempotent across use
+ * of the inode, so let the slab aware of that.
+ */
+static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
+{
+ struct inode * inode = (struct inode *) foo;
+
+ if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+ SLAB_CTOR_CONSTRUCTOR)
+ {
+ memset(inode, 0, sizeof(*inode));
+ init_waitqueue_head(&inode->i_wait);
+ INIT_LIST_HEAD(&inode->i_hash);
+ INIT_LIST_HEAD(&inode->i_dentry);
+ sema_init(&inode->i_sem, 1);
+ spin_lock_init(&inode->i_shared_lock);
+ }
+}

/*
* Put the inode on the super block's dirty list.
@@ -118,20 +146,6 @@
__wait_on_inode(inode);
}

-/*
- * These are initializations that only need to be done
- * once, because the fields are idempotent across use
- * of the inode..
- */
-static inline void init_once(struct inode * inode)
-{
- memset(inode, 0, sizeof(*inode));
- init_waitqueue_head(&inode->i_wait);
- INIT_LIST_HEAD(&inode->i_hash);
- INIT_LIST_HEAD(&inode->i_dentry);
- sema_init(&inode->i_sem, 1);
- spin_lock_init(&inode->i_shared_lock);
-}

static inline void write_inode(struct inode *inode)
{
@@ -147,7 +161,8 @@
spin_lock(&inode_lock);
} else {
list_del(&inode->i_list);
- list_add(&inode->i_list, &inode_in_use);
+ list_add(&inode->i_list,
+ inode->i_count ? &inode_in_use : &inode_unused);
/* Set I_LOCK, reset I_DIRTY */
inode->i_state ^= I_DIRTY | I_LOCK;
spin_unlock(&inode_lock);
@@ -233,6 +248,8 @@
{
if (inode->i_nrpages)
BUG();
+ if (!(inode->i_state & I_FREEING))
+ BUG();
wait_on_inode(inode);
if (IS_QUOTAINIT(inode))
DQUOT_DROP(inode);
@@ -243,35 +260,24 @@
}

/*
- * Dispose-list gets a local list, so it doesn't need to
- * worry about list corruption. It releases the inode lock
- * while clearing the inodes.
+ * Dispose-list gets a local list with local inodes in it, so it doesn't
+ * need to worry about list corruption and SMP locks.
*/
static void dispose_list(struct list_head * head)
{
- struct list_head *next;
- int count = 0;
+ struct list_head * inode_entry;
+ struct inode * inode;

- spin_unlock(&inode_lock);
- next = head->next;
- for (;;) {
- struct list_head * tmp = next;
- struct inode * inode;
+ while ((inode_entry = head->next) != head)
+ {
+ list_del(inode_entry);

- next = next->next;
- if (tmp == head)
- break;
- inode = list_entry(tmp, struct inode, i_list);
+ inode = list_entry(inode_entry, struct inode, i_list);
if (inode->i_nrpages)
truncate_inode_pages(inode, 0);
clear_inode(inode);
- count++;
+ destroy_inode(inode);
}
-
- /* Add them all to the unused list in one fell swoop */
- spin_lock(&inode_lock);
- list_splice(head, &inode_unused);
- inodes_stat.nr_free_inodes += count;
}

/*
@@ -280,7 +286,7 @@
static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
{
struct list_head *next;
- int busy = 0;
+ int busy = 0, count = 0;

next = head->next;
for (;;) {
@@ -299,10 +305,13 @@
list_del(&inode->i_list);
list_add(&inode->i_list, dispose);
inode->i_state |= I_FREEING;
+ count++;
continue;
}
busy = 1;
}
+ /* only unused inodes may be cached with i_count zero */
+ inodes_stat.nr_unused -= count;
return busy;
}

@@ -320,10 +329,12 @@

spin_lock(&inode_lock);
busy = invalidate_list(&inode_in_use, sb, &throw_away);
+ busy |= invalidate_list(&inode_unused, sb, &throw_away);
busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
- dispose_list(&throw_away);
spin_unlock(&inode_lock);

+ dispose_list(&throw_away);
+
return busy;
}

@@ -339,155 +350,79 @@
* dispose_list.
*/
#define CAN_UNUSE(inode) \
- (((inode)->i_count | (inode)->i_state | (inode)->i_nrpages) == 0)
+ (((inode)->i_state | (inode)->i_nrpages) == 0)
#define INODE(entry) (list_entry(entry, struct inode, i_list))

-static int free_inodes(void)
+int prune_icache(int goal)
{
- struct list_head list, *entry, *freeable = &list;
- int found = 0;
+ LIST_HEAD(list);
+ struct list_head *entry, *freeable = &list;
+ int count = 0;
+ struct inode * inode;

- INIT_LIST_HEAD(freeable);
- entry = inode_in_use.next;
- while (entry != &inode_in_use) {
+ spin_lock(&inode_lock);
+ /* go simple and safe syncing everything before starting */
+ sync_all_inodes();
+
+ entry = inode_unused.prev;
+ while (entry != &inode_unused)
+ {
struct list_head *tmp = entry;

- entry = entry->next;
- if (!CAN_UNUSE(INODE(tmp)))
+ entry = entry->prev;
+ inode = INODE(tmp);
+ if (!CAN_UNUSE(inode))
continue;
+ if (inode->i_count)
+ BUG();
list_del(tmp);
- list_del(&INODE(tmp)->i_hash);
- INIT_LIST_HEAD(&INODE(tmp)->i_hash);
+ list_del(&inode->i_hash);
+ INIT_LIST_HEAD(&inode->i_hash);
list_add(tmp, freeable);
- list_entry(tmp, struct inode, i_list)->i_state = I_FREEING;
- found = 1;
+ inode->i_state |= I_FREEING;
+ count++;
+ if (!--goal)
+ break;
}
+ inodes_stat.nr_unused -= count;
+ spin_unlock(&inode_lock);

- if (found)
- dispose_list(freeable);
+ dispose_list(freeable);

- return found;
+ return count;
}

-/*
- * Searches the inodes list for freeable inodes,
- * shrinking the dcache before (and possible after,
- * if we're low)
- */
-static void try_to_free_inodes(int goal)
+int shrink_icache_memory(int priority, int gfp_mask)
{
- /*
- * First stry to just get rid of unused inodes.
- *
- * If we can't reach our goal that way, we'll have
- * to try to shrink the dcache and sync existing
- * inodes..
- */
- free_inodes();
- goal -= inodes_stat.nr_free_inodes;
- if (goal > 0) {
- spin_unlock(&inode_lock);
- select_dcache(goal, 0);
- prune_dcache(goal);
- spin_lock(&inode_lock);
- sync_all_inodes();
- free_inodes();
+ if (gfp_mask & __GFP_IO)
+ {
+ int count = 0;
+ if (priority)
+ count = inodes_stat.nr_unused / priority;
+ return prune_icache(count);
}
-}

-/*
- * This is the externally visible routine for
- * inode memory management.
- */
-void free_inode_memory(int goal)
-{
- spin_lock(&inode_lock);
- free_inodes();
- spin_unlock(&inode_lock);
+ return 0;
}

-
-/*
- * This is called with the spinlock held, but releases
- * the lock when freeing or allocating inodes.
- * Look out! This returns with the inode lock held if
- * it got an inode..
- *
- * We do inode allocations two pages at a time to reduce
- * fragmentation.
- */
-#define INODE_PAGE_ORDER 1
-#define INODE_ALLOCATION_SIZE (PAGE_SIZE << INODE_PAGE_ORDER)
-#define INODES_PER_ALLOCATION (INODE_ALLOCATION_SIZE/sizeof(struct inode))
-
-static struct inode * grow_inodes(void)
+static inline void __iget(struct inode * inode)
{
- struct inode * inode;
-
- /*
- * Check whether to restock the unused list.
- */
- if (inodes_stat.nr_inodes > max_inodes) {
- struct list_head *tmp;
- try_to_free_inodes(inodes_stat.nr_inodes >> 2);
- tmp = inode_unused.next;
- if (tmp != &inode_unused) {
- inodes_stat.nr_free_inodes--;
- list_del(tmp);
- inode = list_entry(tmp, struct inode, i_list);
- return inode;
- }
- }
-
- spin_unlock(&inode_lock);
- inode = (struct inode *)__get_free_pages(GFP_KERNEL,INODE_PAGE_ORDER);
- if (inode) {
- int size;
- struct inode * tmp;
-
- size = INODE_ALLOCATION_SIZE - 2*sizeof(struct inode);
- tmp = inode;
- spin_lock(&inode_lock);
- do {
- tmp++;
- init_once(tmp);
- list_add(&tmp->i_list, &inode_unused);
- size -= sizeof(struct inode);
- } while (size >= 0);
- init_once(inode);
- /*
- * Update the inode statistics
- */
- inodes_stat.nr_inodes += INODES_PER_ALLOCATION;
- inodes_stat.nr_free_inodes += INODES_PER_ALLOCATION - 1;
- return inode;
- }
-
- /*
- * If the allocation failed, do an extensive pruning of
- * the dcache and then try again to free some inodes.
- */
- prune_dcache(inodes_stat.nr_inodes >> 2);
-
- spin_lock(&inode_lock);
- free_inodes();
+ if (!inode->i_count++)
{
- struct list_head *tmp = inode_unused.next;
- if (tmp != &inode_unused) {
- inodes_stat.nr_free_inodes--;
- list_del(tmp);
- inode = list_entry(tmp, struct inode, i_list);
- return inode;
+ if (!(inode->i_state & I_DIRTY))
+ {
+ list_del(&inode->i_list);
+ list_add(&inode->i_list, &inode_in_use);
}
+ inodes_stat.nr_unused--;
}
- spin_unlock(&inode_lock);
-
- printk("grow_inodes: allocation failed\n");
- return NULL;
}

/*
* Called with the inode lock held.
+ * NOTE: we are not increasing the inode-refcount, you must call __iget()
+ * by hand after calling find_inode now! This simplify iunique and won't
+ * add any additional branch in the common code.
*/
static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head)
{
@@ -505,7 +440,6 @@
continue;
if (inode->i_ino != ino)
continue;
- inode->i_count++;
break;
}
return inode;
@@ -518,7 +452,7 @@
* i_sb, i_ino, i_count, i_state and the lists have
* been initialized elsewhere..
*/
-void clean_inode(struct inode *inode)
+static void clean_inode(struct inode *inode)
{
memset(&inode->u, 0, sizeof(inode->u));
inode->i_sock = 0;
@@ -528,7 +462,6 @@
inode->i_size = 0;
inode->i_generation = 0;
memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
- sema_init(&inode->i_sem, 1);
inode->i_pipe = NULL;
}

@@ -542,15 +475,11 @@
{
static unsigned long last_ino = 0;
struct inode * inode;
- struct list_head * tmp;

- spin_lock(&inode_lock);
- tmp = inode_unused.next;
- if (tmp != &inode_unused) {
- list_del(tmp);
- inodes_stat.nr_free_inodes--;
- inode = list_entry(tmp, struct inode, i_list);
-add_new_inode:
+ inode = alloc_inode();
+ if (inode)
+ {
+ spin_lock(&inode_lock);
list_add(&inode->i_list, &inode_in_use);
inode->i_sb = NULL;
inode->i_dev = 0;
@@ -560,22 +489,12 @@
inode->i_state = 0;
spin_unlock(&inode_lock);
clean_inode(inode);
- return inode;
}
-
- /*
- * Warning: if this succeeded, we will now
- * return with the inode lock.
- */
- inode = grow_inodes();
- if (inode)
- goto add_new_inode;
-
return inode;
}

/*
- * This is called with the inode lock held.. Be careful.
+ * This is called without the inode lock held.. Be careful.
*
* We no longer cache the sb_flags in i_flags - see fs.h
* -- rmk@arm.uk.linux.org
@@ -583,56 +502,47 @@
static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, struct list_head *head)
{
struct inode * inode;
- struct list_head * tmp = inode_unused.next;

- if (tmp != &inode_unused) {
- list_del(tmp);
- inodes_stat.nr_free_inodes--;
- inode = list_entry(tmp, struct inode, i_list);
-add_new_inode:
- list_add(&inode->i_list, &inode_in_use);
- list_add(&inode->i_hash, head);
- inode->i_sb = sb;
- inode->i_dev = sb->s_dev;
- inode->i_ino = ino;
- inode->i_flags = 0;
- inode->i_count = 1;
- inode->i_state = I_LOCK;
- spin_unlock(&inode_lock);
+ inode = alloc_inode();
+ if (inode) {
+ struct inode * old;

- clean_inode(inode);
- sb->s_op->read_inode(inode);
+ spin_lock(&inode_lock);
+ /* We released the lock, so.. */
+ old = find_inode(sb, ino, head);
+ if (!old)
+ {
+ list_add(&inode->i_list, &inode_in_use);
+ list_add(&inode->i_hash, head);
+ inode->i_sb = sb;
+ inode->i_dev = sb->s_dev;
+ inode->i_ino = ino;
+ inode->i_flags = 0;
+ inode->i_count = 1;
+ inode->i_state = I_LOCK;
+ spin_unlock(&inode_lock);

- /*
- * This is special! We do not need the spinlock
- * when clearing I_LOCK, because we're guaranteed
- * that nobody else tries to do anything about the
- * state of the inode when it is locked, as we
- * just created it (so there can be no old holders
- * that haven't tested I_LOCK).
- */
- inode->i_state &= ~I_LOCK;
- wake_up(&inode->i_wait);
+ clean_inode(inode);
+ sb->s_op->read_inode(inode);

- return inode;
- }
+ /*
+ * This is special! We do not need the spinlock
+ * when clearing I_LOCK, because we're guaranteed
+ * that nobody else tries to do anything about the
+ * state of the inode when it is locked, as we
+ * just created it (so there can be no old holders
+ * that haven't tested I_LOCK).
+ */
+ inode->i_state &= ~I_LOCK;
+ wake_up(&inode->i_wait);

- /*
- * We need to expand. Note that "grow_inodes()" will
- * release the spinlock, but will return with the lock
- * held again if the allocation succeeded.
- */
- inode = grow_inodes();
- if (inode) {
- /* We released the lock, so.. */
- struct inode * old = find_inode(sb, ino, head);
- if (!old)
- goto add_new_inode;
- list_add(&inode->i_list, &inode_unused);
- inodes_stat.nr_free_inodes++;
+ return inode;
+ }
+ __iget(inode);
spin_unlock(&inode_lock);
- wait_on_inode(old);
- return old;
+ destroy_inode(inode);
+ inode = old;
+ wait_on_inode(inode);
}
return inode;
}
@@ -660,7 +570,6 @@
spin_unlock(&inode_lock);
return res;
}
- inode->i_count--; /* compensate find_inode() */
} else {
counter = max_reserved + 1;
}
@@ -671,10 +580,10 @@
struct inode *igrab(struct inode *inode)
{
spin_lock(&inode_lock);
- if (inode->i_state & I_FREEING)
- inode = NULL;
+ if (!(inode->i_state & I_FREEING))
+ __iget(inode);
else
- inode->i_count++;
+ inode = NULL;
spin_unlock(&inode_lock);
if (inode)
wait_on_inode(inode);
@@ -689,14 +598,16 @@
spin_lock(&inode_lock);
inode = find_inode(sb, ino, head);
if (inode) {
+ __iget(inode);
spin_unlock(&inode_lock);
wait_on_inode(inode);
return inode;
}
+ spin_unlock(&inode_lock);
+
/*
- * get_new_inode() will do the right thing, releasing
- * the inode lock and re-trying the search in case it
- * had to block at any point.
+ * get_new_inode() will do the right thing, re-trying the search
+ * in case it had to block at any point.
*/
return get_new_inode(sb, ino, head);
}
@@ -721,6 +632,7 @@
{
if (inode) {
struct super_operations *op = NULL;
+ int destroy = 0;

if (inode->i_sb && inode->i_sb->s_op)
op = inode->i_sb->s_op;
@@ -750,13 +662,17 @@
inode->i_state|=I_FREEING;
spin_unlock(&inode_lock);
clear_inode(inode);
+ destroy = 1;
spin_lock(&inode_lock);
- list_add(&inode->i_list, &inode_unused);
- inodes_stat.nr_free_inodes++;
}
- else if (!(inode->i_state & I_DIRTY)) {
- list_del(&inode->i_list);
- list_add(&inode->i_list, &inode_in_use);
+ else
+ {
+ if (!(inode->i_state & I_DIRTY)) {
+ list_del(&inode->i_list);
+ list_add(&inode->i_list,
+ &inode_unused);
+ }
+ inodes_stat.nr_unused++;
}
#ifdef INODE_PARANOIA
if (inode->i_flock)
@@ -778,6 +694,8 @@
kdevname(inode->i_dev), inode->i_ino);
}
spin_unlock(&inode_lock);
+ if (destroy)
+ destroy_inode(inode);
}
}

@@ -795,14 +713,11 @@
}

/*
- * Initialize the hash tables and default
- * value for max inodes
+ * Initialize the hash tables.
*/
-#define MAX_INODE (16384)
-
void __init inode_init(void)
{
- int i, max;
+ int i;
struct list_head *head = inode_hashtable;

i = HASH_SIZE;
@@ -812,11 +727,12 @@
i--;
} while (i);

- /* Initial guess at reasonable inode number */
- max = num_physpages >> 1;
- if (max > MAX_INODE)
- max = MAX_INODE;
- max_inodes = max;
+ /* inode slab cache */
+ inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
+ 0, SLAB_HWCACHE_ALIGN, init_once,
+ NULL);
+ if (!inode_cachep)
+ panic("cannot create inode slab cache");
}

void update_atime (struct inode *inode)
diff -urN 2.3.18ac2/include/linux/dcache.h 2.3.18ac2-inode/include/linux/dcache.h
--- 2.3.18ac2/include/linux/dcache.h Tue Jul 13 02:01:39 1999
+++ 2.3.18ac2-inode/include/linux/dcache.h Mon Sep 13 19:15:48 1999
@@ -131,7 +131,7 @@

/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
-extern void prune_dcache(int);
+extern int prune_dcache(int);
extern void shrink_dcache_sb(struct super_block *);
extern void shrink_dcache_parent(struct dentry *);
extern int d_invalidate(struct dentry *);
@@ -140,9 +140,12 @@

/* dcache memory management */
extern int select_dcache(int, int);
-extern void shrink_dcache_memory(int, unsigned int);
+extern int shrink_dcache_memory(int, unsigned int);
extern void check_dcache_memory(void);
-extern void free_inode_memory(int); /* defined in fs/inode.c */
+
+/* icache memory management (defined in linux/fs/inode.c) */
+extern int shrink_icache_memory(int, int);
+extern int prune_icache(int);

/* only used at mount-time */
extern struct dentry * d_alloc_root(struct inode *);
diff -urN 2.3.18ac2/include/linux/fs.h 2.3.18ac2-inode/include/linux/fs.h
--- 2.3.18ac2/include/linux/fs.h Mon Sep 13 03:38:46 1999
+++ 2.3.18ac2-inode/include/linux/fs.h Mon Sep 13 19:15:48 1999
@@ -45,7 +45,6 @@
#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)

/* And dynamically-tunable limits and defaults: */
-extern int max_inodes;
extern int max_files, nr_files, nr_free_files;
extern int max_super_blocks, nr_super_blocks;

diff -urN 2.3.18ac2/kernel/sysctl.c 2.3.18ac2-inode/kernel/sysctl.c
--- 2.3.18ac2/kernel/sysctl.c Wed Sep 8 00:26:08 1999
+++ 2.3.18ac2-inode/kernel/sysctl.c Mon Sep 13 19:15:48 1999
@@ -256,8 +256,6 @@
0444, NULL, &proc_dointvec},
{FS_STATINODE, "inode-state", &inodes_stat, 7*sizeof(int),
0444, NULL, &proc_dointvec},
- {FS_MAXINODE, "inode-max", &max_inodes, sizeof(int),
- 0644, NULL, &proc_dointvec},
{FS_NRFILE, "file-nr", &nr_files, 3*sizeof(int),
0444, NULL, &proc_dointvec},
{FS_MAXFILE, "file-max", &max_files, sizeof(int),
diff -urN 2.3.18ac2/mm/vmscan.c 2.3.18ac2-inode/mm/vmscan.c
--- 2.3.18ac2/mm/vmscan.c Wed Sep 8 00:26:08 1999
+++ 2.3.18ac2-inode/mm/vmscan.c Mon Sep 13 19:18:27 1999
@@ -424,6 +424,19 @@
goto done;
}

+ /* don't be too light against the d/i cache since
+ shrink_mmap() almost never fail when there's
+ really plenty of memory free and swap_out()
+ rarely fail even if we are low on memory. */
+ {
+ int progress;
+
+ progress = shrink_dcache_memory(priority, gfp_mask);
+ progress |= shrink_icache_memory(priority, gfp_mask);
+ if (progress)
+ kmem_cache_reap(gfp_mask);
+ }
+
/* Try to get rid of some shared memory pages.. */
if (gfp_mask & __GFP_IO) {
while (shm_swap(priority, gfp_mask)) {
@@ -437,8 +450,6 @@
if (!--count)
goto done;
}
-
- shrink_dcache_memory(priority, gfp_mask);
} while (--priority >= 0);
done:

Andrea


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:53    [W:0.283 / U:0.436 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site