lkml.org 
[lkml]   [1998]   [Mar]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subjectperformance patch for 2.1.90 fs/buffer.c
The attached patch should provide a small performance improvement for buffer.c
by avoiding repeated wake_ups and reducing the number of xchg() operations.

In put_unused_buffer_head() I've removed the wake_up code from the function and
placed it in the caller's context (when needed) so that it only gets called
once. Since put_unused_buffer_head() is called from a loop over buffers, there's
no need to do a wake_up for every call.

For the case of recover_reusable_bufer_heads(), we don't need to do a wake_up,
but instead return a value indicating whether any buffer heads were recovered.
This allows a task to avoid doing a schedule() to wait for buffer heads. (Tasks
already sleeping for buffer heads will be awakened when any are added to the
reuse list, as noted below.)

In free_async_buffers, I've reworked the loop to link the buffer heads first,
and then add the current reuse list with one xchg() operation. For some
architectures (or maybe all) the xchg op is expensive, so there's no reason to
do it more than we need to.

The rewritten code also avoids an unlikely but subtle SMP problem: the previous
code references tmp after placing it on the reuse list. The cli() exclusion
ensures that only one caller can be in free_async_buffers, but another cpu could
be running code in recover_reusable_buffer_heads. If an icache miss follows the
assignment to reuse, the other cpu could reclaim the buffer head and clear it,
resulting in a null pointer for b_this_page.

I've also added a call to wake up the buffer_list in free_async_buffers, as we
need to ensure that this is done whenever adding to the reuse list. This fix was
added to the 2.0.xx tree some time back, but 2.1.xx was missing the wakeup in
one place.

Regards,
Bill--- fs/buffer.c.old Sun Mar 22 11:30:35 1998
+++ fs/buffer.c Tue Mar 24 14:34:14 1998
@@ -1145,6 +1144,9 @@
return NULL;
}

+/*
+ * Note: the caller should wake up the buffer_wait list if needed.
+ */
static void put_unused_buffer_head(struct buffer_head * bh)
{
if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) {
@@ -1157,9 +1159,6 @@
nr_unused_buffer_heads++;
bh->b_next_free = unused_list;
unused_list = bh;
- if (!waitqueue_active(&buffer_wait))
- return;
- wake_up(&buffer_wait);
}

/*
@@ -1169,18 +1168,26 @@
* fields after the final unlock. So, the device driver puts them on
* the reuse_list instead once IO completes, and we recover these to
* the unused_list here.
+ *
+ * Note that we don't do a wakeup here, but return a flag indicating
+ * whether we got any buffer heads. A task ready to sleep can check
+ * the returned value, and any tasks already sleeping will have been
+ * awakened when the buffer heads were added to the reuse list.
*/
-static inline void recover_reusable_buffer_heads(void)
+static inline int recover_reusable_buffer_heads(void)
{
- struct buffer_head *head;
-
- head = xchg(&reuse_list, NULL);
+ struct buffer_head *head = xchg(&reuse_list, NULL);
+ int found = 0;

- while (head) {
- struct buffer_head *bh = head;
- head = head->b_next_free;
- put_unused_buffer_head(bh);
+ if (head) {
+ do {
+ struct buffer_head *bh = head;
+ head = head->b_next_free;
+ put_unused_buffer_head(bh);
+ } while (head);
+ found = 1;
}
+ return found;
}

/*
@@ -1279,11 +1286,16 @@
* In case anything failed, we just free everything we got.
*/
no_grow:
- bh = head;
- while (bh) {
- head = bh;
- bh = bh->b_this_page;
- put_unused_buffer_head(head);
+ if (head) {
+ do {
+ bh = head;
+ head = head->b_this_page;
+ put_unused_buffer_head(bh);
+ } while (head);
+
+ /* Wake up any waiters ... */
+ if (waitqueue_active(&buffer_wait))
+ wake_up(&buffer_wait);
}

/*
@@ -1309,8 +1321,8 @@
*/
add_wait_queue(&buffer_wait, &wait);
current->state = TASK_UNINTERRUPTIBLE;
- recover_reusable_buffer_heads();
- schedule();
+ if (!recover_reusable_buffer_heads())
+ schedule();
remove_wait_queue(&buffer_wait, &wait);
current->state = TASK_RUNNING;
goto try_again;
@@ -1337,14 +1349,25 @@
*/
static inline void free_async_buffers (struct buffer_head * bh)
{
- struct buffer_head * tmp;
+ struct buffer_head *tmp, *tail;

- tmp = bh;
- do {
- tmp->b_next_free = xchg(&reuse_list, NULL);
- reuse_list = tmp;
- tmp = tmp->b_this_page;
- } while (tmp != bh);
+ /*
+ * Link all the buffers into the b_next_free list,
+ * so we only have to do one xchg() operation ...
+ */
+ tail = bh;
+ while ((tmp = tail->b_this_page) != bh) {
+ tail->b_next_free = tmp;
+ tail = tmp;
+ };
+
+ /* Update the reuse list */
+ tail->b_next_free = xchg(&reuse_list, NULL);
+ reuse_list = bh;
+
+ /* Wake up any waiters ... */
+ if (waitqueue_active(&buffer_wait))
+ wake_up(&buffer_wait);
}

static void end_buffer_io_async(struct buffer_head * bh, int uptodate)
@@ -1394,7 +1417,6 @@
clear_bit(PG_locked, &page->flags);
wake_up(&page->wait);
after_unlock_page(page);
- wake_up(&buffer_wait);
return;

still_busy:
@@ -1640,6 +1662,7 @@
return 0;
tmp = tmp->b_this_page;
} while (tmp != bh);
+
tmp = bh;
do {
p = tmp;
@@ -1653,6 +1676,10 @@
remove_from_queues(p);
put_unused_buffer_head(p);
} while (tmp != bh);
+ /* Wake up anyone waiting for buffer heads */
+ if (waitqueue_active(&buffer_wait))
+ wake_up(&buffer_wait);
+
buffermem -= PAGE_SIZE;
mem_map[MAP_NR(page)].buffers = NULL;
free_page(page);
\
 
 \ /
  Last update: 2005-03-22 13:41    [W:0.036 / U:0.312 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site