lkml.org 
[lkml]   [2017]   [Jul]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Date
    Subject[PATCH 3.16 090/178] ext4: lock the xattr block before checksuming it
    3.16.46-rc1 review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Theodore Ts'o <tytso@mit.edu>

    commit dac7a4b4b1f664934e8b713f529b629f67db313c upstream.

    We must lock the xattr block before calculating or verifying the
    checksum in order to avoid spurious checksum failures.

    https://bugzilla.kernel.org/show_bug.cgi?id=193661

    Reported-by: Colin Ian King <colin.king@canonical.com>
    Signed-off-by: Theodore Ts'o <tytso@mit.edu>
    [bwh: Backported to 3.16: adjust context]
    Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
    ---
    fs/ext4/xattr.c | 65 +++++++++++++++++++++++++++------------------------------
    1 file changed, 31 insertions(+), 34 deletions(-)

    --- a/fs/ext4/xattr.c
    +++ b/fs/ext4/xattr.c
    @@ -139,31 +139,26 @@ static __le32 ext4_xattr_block_csum(stru
    }

    static int ext4_xattr_block_csum_verify(struct inode *inode,
    - sector_t block_nr,
    - struct ext4_xattr_header *hdr)
    + struct buffer_head *bh)
    {
    - if (ext4_has_metadata_csum(inode->i_sb) &&
    - (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
    - return 0;
    - return 1;
    -}
    -
    -static void ext4_xattr_block_csum_set(struct inode *inode,
    - sector_t block_nr,
    - struct ext4_xattr_header *hdr)
    -{
    - if (!ext4_has_metadata_csum(inode->i_sb))
    - return;
    + struct ext4_xattr_header *hdr = BHDR(bh);
    + int ret = 1;

    - hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
    + if (ext4_has_metadata_csum(inode->i_sb)) {
    + lock_buffer(bh);
    + ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
    + bh->b_blocknr, hdr));
    + unlock_buffer(bh);
    + }
    + return ret;
    }

    -static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
    - struct inode *inode,
    - struct buffer_head *bh)
    +static void ext4_xattr_block_csum_set(struct inode *inode,
    + struct buffer_head *bh)
    {
    - ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
    - return ext4_handle_dirty_metadata(handle, inode, bh);
    + if (ext4_has_metadata_csum(inode->i_sb))
    + BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
    + bh->b_blocknr, BHDR(bh));
    }

    static inline const struct xattr_handler *
    @@ -224,7 +219,7 @@ ext4_xattr_check_block(struct inode *ino
    if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
    BHDR(bh)->h_blocks != cpu_to_le32(1))
    return -EIO;
    - if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
    + if (!ext4_xattr_block_csum_verify(inode, bh))
    return -EIO;
    error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
    bh->b_data);
    @@ -568,23 +563,22 @@ ext4_xattr_release_block(handle_t *handl
    le32_add_cpu(&BHDR(bh)->h_refcount, -1);
    if (ce)
    mb_cache_entry_release(ce);
    + ext4_xattr_block_csum_set(inode, bh);
    /*
    * Beware of this ugliness: Releasing of xattr block references
    * from different inodes can race and so we have to protect
    * from a race where someone else frees the block (and releases
    * its journal_head) before we are done dirtying the buffer. In
    * nojournal mode this race is harmless and we actually cannot
    - * call ext4_handle_dirty_xattr_block() with locked buffer as
    + * call ext4_handle_dirty_metadata() with locked buffer as
    * that function can call sync_dirty_buffer() so for that case
    * we handle the dirtying after unlocking the buffer.
    */
    if (ext4_handle_valid(handle))
    - error = ext4_handle_dirty_xattr_block(handle, inode,
    - bh);
    + error = ext4_handle_dirty_metadata(handle, inode, bh);
    unlock_buffer(bh);
    if (!ext4_handle_valid(handle))
    - error = ext4_handle_dirty_xattr_block(handle, inode,
    - bh);
    + error = ext4_handle_dirty_metadata(handle, inode, bh);
    if (IS_SYNC(inode))
    ext4_handle_sync(handle);
    dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
    @@ -812,13 +806,14 @@ ext4_xattr_block_set(handle_t *handle, s
    ext4_xattr_cache_insert(ext4_mb_cache,
    bs->bh);
    }
    + ext4_xattr_block_csum_set(inode, bs->bh);
    unlock_buffer(bs->bh);
    if (error == -EIO)
    goto bad_block;
    if (!error)
    - error = ext4_handle_dirty_xattr_block(handle,
    - inode,
    - bs->bh);
    + error = ext4_handle_dirty_metadata(handle,
    + inode,
    + bs->bh);
    if (error)
    goto cleanup;
    goto inserted;
    @@ -887,10 +882,11 @@ inserted:
    le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
    ea_bdebug(new_bh, "reusing; refcount now=%d",
    le32_to_cpu(BHDR(new_bh)->h_refcount));
    + ext4_xattr_block_csum_set(inode, new_bh);
    unlock_buffer(new_bh);
    - error = ext4_handle_dirty_xattr_block(handle,
    - inode,
    - new_bh);
    + error = ext4_handle_dirty_metadata(handle,
    + inode,
    + new_bh);
    if (error)
    goto cleanup_dquot;
    }
    @@ -945,11 +941,12 @@ getblk_failed:
    goto getblk_failed;
    }
    memcpy(new_bh->b_data, s->base, new_bh->b_size);
    + ext4_xattr_block_csum_set(inode, new_bh);
    set_buffer_uptodate(new_bh);
    unlock_buffer(new_bh);
    ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
    - error = ext4_handle_dirty_xattr_block(handle,
    - inode, new_bh);
    + error = ext4_handle_dirty_metadata(handle, inode,
    + new_bh);
    if (error)
    goto cleanup;
    }
    \
     
     \ /
      Last update: 2017-07-16 16:34    [W:7.075 / U:0.132 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site