lkml.org 
[lkml]   [2000]   [Aug]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: Bad SMP race in getblk()
On Wed, 2 Aug 2000, Mark Hemment wrote:

> getblk() (fs/buffer.c) can now be called without the BLK being held.
> After searching the hash table (get_hash_table()), a free buffer-head
>can be claimed off the freelist and inserted into the hash-table without
>re-checking the hash-table (the hash-table lock is dropped between the
>original search and new insert). This gives serious problems on SMP.

You couldn't optimize it with the cookie right? 8)

That was well known and fixed (I just confirmed the patch floating around
since some month to be correct and necesary). I guess it still apply
cleanly. Linus I think it's good idea to include it:

Index: kallsyms-kdb.2/fs/buffer.c
--- kallsyms-kdb.2/fs/buffer.c Sat, 03 Jun 2000 16:48:29 +0100 steved (linux/P/22_buffer.c 1.3.1.1.1.6 644)
+++ kallsyms-kdb.2(w)/fs/buffer.c Sat, 03 Jun 2000 18:36:06 +0100 steved (linux/P/22_buffer.c 1.3.1.1.1.6 644)
@@ -28,6 +28,8 @@

/* async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de> */

+/* [6-May-2000, Steve Dodd] fix SMP race in getblk() */
+
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -495,17 +497,6 @@ static void __remove_from_queues(struct
__remove_from_lru_list(bh, bh->b_list);
}

-static void insert_into_queues(struct buffer_head *bh)
-{
- struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr);
-
- spin_lock(&lru_list_lock);
- write_lock(&hash_table_lock);
- __hash_link(bh, head);
- __insert_into_lru_list(bh, bh->b_list);
- write_unlock(&hash_table_lock);
- spin_unlock(&lru_list_lock);
-}

/* This function must only run if there are no other
* references _anywhere_ to this buffer head.
@@ -530,19 +521,11 @@ static void put_last_free(struct buffer_
spin_unlock(&head->lock);
}

-/*
- * Why like this, I hear you say... The reason is race-conditions.
- * As we don't lock buffers (unless we are reading them, that is),
- * something might happen to it while we sleep (ie a read-error
- * will force it bad). This shouldn't really happen currently, but
- * the code is ready.
- */
-struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
+static struct buffer_head * __get_hash_table(kdev_t dev, int block, int size,
+ struct buffer_head **head)
{
- struct buffer_head **head = &hash(dev, block);
struct buffer_head *bh;

- read_lock(&hash_table_lock);
for(bh = *head; bh; bh = bh->b_next)
if (bh->b_blocknr == block &&
bh->b_size == size &&
@@ -550,11 +533,44 @@ struct buffer_head * get_hash_table(kdev
break;
if (bh)
atomic_inc(&bh->b_count);
+
+ return bh;
+}
+
+struct buffer_head * get_hash_table(kdev_t dev, int block, int size)
+{
+ struct buffer_head **head = &hash(dev, block);
+ struct buffer_head *bh;
+
+ read_lock(&hash_table_lock);
+ bh = __get_hash_table(dev, block, size, head);
read_unlock(&hash_table_lock);

return bh;
}

+static int insert_into_queues_unique(struct buffer_head *bh)
+{
+ struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr);
+ struct buffer_head *alias;
+ int err = 0;
+
+ spin_lock(&lru_list_lock);
+ write_lock(&hash_table_lock);
+
+ alias = __get_hash_table(bh->b_dev, bh->b_blocknr, bh->b_size, head);
+ if (!alias) {
+ __hash_link(bh, head);
+ __insert_into_lru_list(bh, bh->b_list);
+ } else
+ err = 1;
+
+ write_unlock(&hash_table_lock);
+ spin_unlock(&lru_list_lock);
+
+ return err;
+}
+
unsigned int get_hardblocksize(kdev_t dev)
{
/*
@@ -831,9 +847,8 @@ repeat:
spin_unlock(&free_list[isize].lock);

/*
- * OK, FINALLY we know that this buffer is the only one of
- * its kind, we hold a reference (b_count>0), it is unlocked,
- * and it is clean.
+ * OK, we hold a reference (b_count>0) to the buffer,
+ * it is unlocked, and it is clean.
*/
if (bh) {
init_buffer(bh, end_buffer_io_sync, NULL);
@@ -841,8 +856,16 @@ repeat:
bh->b_blocknr = block;
bh->b_state = 1 << BH_Mapped;

- /* Insert the buffer into the regular lists */
- insert_into_queues(bh);
+ /* Insert the buffer into the regular lists; check nobody
+ else added it first */
+
+ if (!insert_into_queues_unique(bh))
+ goto out;
+
+ /* someone added it after we last checked the hash table */
+ put_last_free(bh);
+ goto repeat;
+
out:
touch_buffer(bh);
return bh;
Andrea

PS. Since all fs are running under the big lock that race could't trigger
unless you was using the blockdevice directly.


-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:57    [W:1.154 / U:0.076 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site