lkml.org 
[lkml]   [2005]   [Nov]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 7/9] Cleanup cache_reap()
cache_reap() could also use a little love.  Here's a patch to clean it up.

-Matt
General readability fixes.

* Goto to end of function instead of duplicating code in case of
failure to grab cache_chain_sem.
* Replace a list_for_each/list_entry combo with an identical but
more readable list_for_each_entry loop.
* Move the declaration of a variables not referenced outside of
certain loops inside those loops.
* Store the numa_node_id() in a local variable.

Signed-off-by: Matthew Dobson <colpatch@us.ibm.com>

Index: linux-2.6.14+slab_cleanup/mm/slab.c
===================================================================
--- linux-2.6.14+slab_cleanup.orig/mm/slab.c 2005-11-10 11:48:47.540627688 -0800
+++ linux-2.6.14+slab_cleanup/mm/slab.c 2005-11-10 11:48:49.384347400 -0800
@@ -3302,45 +3302,32 @@ static void drain_array_locked(kmem_cach
*/
static void cache_reap(void *unused)
{
- struct list_head *walk;
- struct kmem_list3 *l3;
-
- if (down_trylock(&cache_chain_sem)) {
- /* Give up. Setup the next iteration. */
- schedule_delayed_work(&__get_cpu_var(reap_work),
- REAPTIMEOUT_CPUC + smp_processor_id());
- return;
- }
+ kmem_cache_t *searchp;

- list_for_each(walk, &cache_chain) {
- kmem_cache_t *searchp;
- struct list_head* p;
- int tofree;
- struct slab *slabp;
+ if (down_trylock(&cache_chain_sem))
+ goto out;

- searchp = list_entry(walk, kmem_cache_t, next);
+ list_for_each_entry(searchp, &cache_chain, next) {
+ struct kmem_list3 *l3;
+ int tofree, nid = numa_node_id();

if (searchp->flags & SLAB_NO_REAP)
goto next;

check_irq_on();
-
- l3 = searchp->nodelists[numa_node_id()];
+ l3 = searchp->nodelists[nid];
if (l3->alien)
drain_alien_cache(searchp, l3);
spin_lock_irq(&l3->list_lock);

- drain_array_locked(searchp, ac_data(searchp), 0,
- numa_node_id());
+ drain_array_locked(searchp, ac_data(searchp), 0, nid);

if (time_after(l3->next_reap, jiffies))
goto next_unlock;
-
l3->next_reap = jiffies + REAPTIMEOUT_LIST3;

if (l3->shared)
- drain_array_locked(searchp, l3->shared, 0,
- numa_node_id());
+ drain_array_locked(searchp, l3->shared, 0, nid);

if (l3->free_touched) {
l3->free_touched = 0;
@@ -3350,7 +3337,9 @@ static void cache_reap(void *unused)
tofree = 5 * searchp->num;
tofree = (l3->free_limit + tofree - 1) / tofree;
do {
- p = l3->slabs_free.next;
+ struct list_head *p = l3->slabs_free.next;
+ struct slab *slabp;
+
if (p == &(l3->slabs_free))
break;

@@ -3377,6 +3366,7 @@ next:
check_irq_on();
up(&cache_chain_sem);
drain_remote_pages();
+out:
/* Setup the next iteration */
schedule_delayed_work(&__get_cpu_var(reap_work),
REAPTIMEOUT_CPUC + smp_processor_id());
\
 
 \ /
  Last update: 2005-11-11 01:08    [W:1.571 / U:0.204 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site