lkml.org 
[lkml]   [1997]   [Jul]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectUpdated patch for get_unused_buffer_head
I've reworked my no-buffer-heads anti-deadlock patch to make it more
bullet-proof, and welcome further comments (and bullets :-))

The changes made were:
(1) Simplified the reserved buffer head accounting by keeping everything
on the unused list and just using the count of free buffer heads.

(2) Changed the reserve count to 2*MAX_BUF_PER_PAGE, ensuring at least
two pages of async IO on any architecture.

(3) Following Colin Plumb's suggestion, I moved the sleep for buffer
heads to create_buffers(), and release any buffer heads allocated if the
full page can't be completed. This ensures that the tasks waiting for
buffer heads aren't tying up resources with partially completed
allocations.

(4) Use add_wait_queue()/schedule()/remove_wait_queue() instead of
sleep_on(). Adding the task to the buffer_wait queue and then checking
for reusable buffer heads ensures that a wake_up from an interrupt won't
be missed.

One further improvement I'd like to add is to allocate non-async buffer
heads using SLAB_KERNEL instead of SLAB_ATOMIC, so that the allocation
can use swapping to free up memory. This should be safe now that async
and non-async requests are differentiated, but I'd like some comments on
this. (Hopefully Mark Hemment has time to take a look.) The code to do
this is commented out.

Regards,
Bill--- buffer.c.wsh Sat Jul 26 16:13:45 1997
+++ buffer.c.new Sun Jul 27 09:18:32 1997
@@ -49,8 +49,9 @@

#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
-#define MAX_UNUSED_BUFFERS 30 /* don't ever have more than this number of
- unused buffer heads */
+#define NR_RESERVED (2*MAX_BUF_PER_PAGE)
+#define MAX_UNUSED_BUFFERS NR_RESERVED+20 /* don't ever have more than this
+ number of unused buffer heads */
#define HASH_PAGES 4 /* number of pages to use for the hash table */
#define HASH_PAGES_ORDER 2
#define NR_HASH (HASH_PAGES*PAGE_SIZE/sizeof(struct buffer_head *))
@@ -1062,34 +1063,11 @@
nr_unused_buffer_heads++;
bh->b_next_free = unused_list;
unused_list = bh;
+ if (!waitqueue_active(&buffer_wait))
+ return;
wake_up(&buffer_wait);
}

-static void get_more_buffer_heads(void)
-{
- struct buffer_head * bh;
-
- while (!unused_list) {
- /* This is critical. We can't swap out pages to get
- * more buffer heads, because the swap-out may need
- * more buffer-heads itself. Thus SLAB_ATOMIC.
- */
- if((bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC)) != NULL) {
- put_unused_buffer_head(bh);
- nr_buffer_heads++;
- return;
- }
-
- /* Uhhuh. We're _really_ low on memory. Now we just
- * wait for old buffer heads to become free due to
- * finishing IO..
- */
- run_task_queue(&tq_disk);
- sleep_on(&buffer_wait);
- }
-
-}
-
/*
* We can't put completed temporary IO buffer_heads directly onto the
* unused_list when they become unlocked, since the device driver
@@ -1111,18 +1089,61 @@
}
}

-static struct buffer_head * get_unused_buffer_head(void)
+/*
+ * WSH 07/24/97: Restructured to remove jumps from the common path and
+ * keep NR_RESERVED buffer heads for async requests.
+ */
+static struct buffer_head * get_unused_buffer_head(int async)
{
struct buffer_head * bh;

recover_reusable_buffer_heads();
- get_more_buffer_heads();
- if (!unused_list)
- return NULL;
- bh = unused_list;
- unused_list = bh->b_next_free;
- nr_unused_buffer_heads--;
- return bh;
+ if (nr_unused_buffer_heads > NR_RESERVED) {
+ bh = unused_list;
+ unused_list = bh->b_next_free;
+ nr_unused_buffer_heads--;
+ return bh;
+ }
+
+ /* This is critical. We can't swap out pages to get
+ * more buffer heads, because the swap-out may need
+ * more buffer-heads itself. Thus SLAB_ATOMIC.
+ */
+ if(
+#if 0
+nr_buffer_heads < 100 &&
+#endif
+ (bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC)) != NULL) {
+ memset(bh, 0, sizeof(*bh));
+ nr_buffer_heads++;
+ return bh;
+ }
+
+ /*
+ * If we need an async buffer, use the reserved buffer heads.
+ */
+ if (async && unused_list) {
+ bh = unused_list;
+ unused_list = bh->b_next_free;
+ nr_unused_buffer_heads--;
+ return bh;
+ }
+
+ /*
+ * Non-synchronous requests can use a different memory priority
+ * to free up pages. Any swapping thus generated will use async
+ * buffer heads.
+ */
+#if 0
+ if(!async &&
+ (bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL)) != NULL) {
+ memset(bh, 0, sizeof(*bh));
+ nr_buffer_heads++;
+ return bh;
+ }
+#endif
+
+ return NULL;
}

/*
@@ -1130,16 +1151,23 @@
* the size of each buffer.. Use the bh->b_this_page linked list to
* follow the buffers created. Return NULL if unable to create more
* buffers.
+ * WSH 07/26/97: Use async flag to differentiate async IO (paging) from
+ * from ordinary buffer allocations. Only allow async requests to sleep
+ * waiting for buffer heads, and check for reusable buffer heads after
+ * adding task to the wait queue.
*/
-static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
+static struct buffer_head * create_buffers(unsigned long page,
+ unsigned long size, int async)
{
+ struct wait_queue wait = { current, NULL };
struct buffer_head *bh, *head;
long offset;

+try_again:
head = NULL;
offset = PAGE_SIZE;
while ((offset -= size) >= 0) {
- bh = get_unused_buffer_head();
+ bh = get_unused_buffer_head(async);
if (!bh)
goto no_grow;

@@ -1166,7 +1194,37 @@
bh = bh->b_this_page;
put_unused_buffer_head(head);
}
- return NULL;
+
+ /*
+ * Return failure for non-async IO requests. Async IO requests
+ * are not allowed to fail, so we have to wait until buffer heads
+ * become available. But we don't want tasks sleeping with
+ * partially complete buffers, so all were released above.
+ */
+ if (!async)
+ return NULL;
+
+ /* Uhhuh. We're _really_ low on memory. Now we just
+ * wait for old buffer heads to become free due to
+ * finishing IO. Since this is an async request and
+ * the reserve list is empty, we're sure there are
+ * async buffer heads in use.
+ */
+ run_task_queue(&tq_disk);
+ /*
+ * Set our state for sleeping, then check again for buffer heads.
+ */
+ add_wait_queue(&buffer_wait, &wait);
+ current->state = TASK_UNINTERRUPTIBLE;
+ recover_reusable_buffer_heads();
+#if 0
+if (current->state != TASK_UNINTERRUPTIBLE)
+printk("create_buffers: got wake up call\n");
+#endif
+ schedule();
+ remove_wait_queue(&buffer_wait, &wait);
+ current->state = TASK_RUNNING;
+ goto try_again;
}

/* Run the hooks that have to be done when a page I/O has completed. */
@@ -1242,12 +1300,13 @@
clear_bit(PG_uptodate, &page->flags);
clear_bit(PG_error, &page->flags);
/*
- * Allocate buffer heads pointing to this page, just for I/O.
+ * Allocate async buffer heads pointing to this page, just for I/O.
* They do _not_ show up in the buffer hash table!
* They are _not_ registered in page->buffers either!
*/
- bh = create_buffers(page_address(page), size);
+ bh = create_buffers(page_address(page), size, 1);
if (!bh) {
+ /* WSH: exit here leaves page->count incremented */
clear_bit(PG_locked, &page->flags);
wake_up(&page->wait);
return -ENOMEM;
@@ -1472,16 +1531,15 @@
return 0;
}

- isize = BUFSIZE_INDEX(size);
-
if (!(page = __get_free_page(pri)))
return 0;
- bh = create_buffers(page, size);
+ bh = create_buffers(page, size, 0);
if (!bh) {
free_page(page);
return 0;
}

+ isize = BUFSIZE_INDEX(size);
insert_point = free_list[isize];

tmp = bh;
@@ -1619,6 +1677,18 @@
SLAB_HWCACHE_ALIGN, NULL, NULL);
if(!bh_cachep)
panic("Cannot create buffer head SLAB cache\n");
+ /*
+ * Allocate the reserved buffer heads.
+ */
+ while (nr_buffer_heads < NR_RESERVED) {
+ struct buffer_head * bh;
+
+ bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC);
+ if (!bh)
+ break;
+ put_unused_buffer_head(bh);
+ nr_buffer_heads++;
+ }

lru_list[BUF_CLEAN] = 0;
grow_buffers(GFP_KERNEL, BLOCK_SIZE);
\
 
 \ /
  Last update: 2005-03-22 13:40    [W:0.047 / U:0.208 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site