lkml.org 
[lkml]   [2022]   [Aug]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.18 1030/1095] btrfs: zoned: write out partially allocated region
    Date
    From: Naohiro Aota <naohiro.aota@wdc.com>

    [ Upstream commit 898793d992c23dac6126a6a94ad893eae1a2c9df ]

    cow_file_range() works in an all-or-nothing way: if it fails to allocate an
    extent for a part of the given region, it gives up all the region including
    the successfully allocated parts. On cow_file_range(), run_delalloc_zoned()
    writes data for the region only when it successfully allocate all the
    region.

    This all-or-nothing allocation and write-out are problematic when available
    space in all the block groups are get tight with the active zone
    restriction. btrfs_reserve_extent() try hard to utilize the left space in
    the active block groups and gives up finally and fails with
    -ENOSPC. However, if we send IOs for the successfully allocated region, we
    can finish a zone and can continue on the rest of the allocation on a newly
    allocated block group.

    This patch implements the partial write-out for run_delalloc_zoned(). With
    this patch applied, cow_file_range() returns -EAGAIN to tell the caller to
    do something to progress the further allocation, and tells the successfully
    allocated region with done_offset. Furthermore, the zoned extent allocator
    returns -EAGAIN to tell cow_file_range() going back to the caller side.

    Actually, we still need to wait for an IO to complete to continue the
    allocation. The next patch implements that part.

    CC: stable@vger.kernel.org # 5.16+
    Fixes: afba2bc036b0 ("btrfs: zoned: implement active zone tracking")
    Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
    Signed-off-by: David Sterba <dsterba@suse.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    fs/btrfs/extent-tree.c | 10 +++++++
    fs/btrfs/inode.c | 63 ++++++++++++++++++++++++++++++++----------
    2 files changed, 59 insertions(+), 14 deletions(-)

    diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
    index 56185541e188..eee68a6f2be7 100644
    --- a/fs/btrfs/extent-tree.c
    +++ b/fs/btrfs/extent-tree.c
    @@ -4015,6 +4015,16 @@ static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info,
    if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size)
    return -ENOSPC;

    + /*
    + * Even min_alloc_size is not left in any block groups. Since we cannot
    + * activate a new block group, allocating it may not help. Let's tell a
    + * caller to try again and hope it progress something by writing some
    + * parts of the region. That is only possible for data block groups,
    + * where a part of the region can be written.
    + */
    + if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)
    + return -EAGAIN;
    +
    /*
    * We cannot activate a new block group and no enough space left in any
    * block groups. So, allocating a new block group may not help. But,
    diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
    index c50288d90c66..9753fc47e488 100644
    --- a/fs/btrfs/inode.c
    +++ b/fs/btrfs/inode.c
    @@ -92,7 +92,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
    static noinline int cow_file_range(struct btrfs_inode *inode,
    struct page *locked_page,
    u64 start, u64 end, int *page_started,
    - unsigned long *nr_written, int unlock);
    + unsigned long *nr_written, int unlock,
    + u64 *done_offset);
    static struct extent_map *create_io_em(struct btrfs_inode *inode, u64 start,
    u64 len, u64 orig_start, u64 block_start,
    u64 block_len, u64 orig_block_len,
    @@ -884,7 +885,7 @@ static int submit_uncompressed_range(struct btrfs_inode *inode,
    * can directly submit them without interruption.
    */
    ret = cow_file_range(inode, locked_page, start, end, &page_started,
    - &nr_written, 0);
    + &nr_written, 0, NULL);
    /* Inline extent inserted, page gets unlocked and everything is done */
    if (page_started) {
    ret = 0;
    @@ -1133,7 +1134,8 @@ static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
    static noinline int cow_file_range(struct btrfs_inode *inode,
    struct page *locked_page,
    u64 start, u64 end, int *page_started,
    - unsigned long *nr_written, int unlock)
    + unsigned long *nr_written, int unlock,
    + u64 *done_offset)
    {
    struct btrfs_root *root = inode->root;
    struct btrfs_fs_info *fs_info = root->fs_info;
    @@ -1326,6 +1328,21 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
    btrfs_dec_block_group_reservations(fs_info, ins.objectid);
    btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
    out_unlock:
    + /*
    + * If done_offset is non-NULL and ret == -EAGAIN, we expect the
    + * caller to write out the successfully allocated region and retry.
    + */
    + if (done_offset && ret == -EAGAIN) {
    + if (orig_start < start)
    + *done_offset = start - 1;
    + else
    + *done_offset = start;
    + return ret;
    + } else if (ret == -EAGAIN) {
    + /* Convert to -ENOSPC since the caller cannot retry. */
    + ret = -ENOSPC;
    + }
    +
    /*
    * Now, we have three regions to clean up:
    *
    @@ -1571,19 +1588,37 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
    u64 end, int *page_started,
    unsigned long *nr_written)
    {
    + u64 done_offset = end;
    int ret;
    + bool locked_page_done = false;

    - ret = cow_file_range(inode, locked_page, start, end, page_started,
    - nr_written, 0);
    - if (ret)
    - return ret;
    + while (start <= end) {
    + ret = cow_file_range(inode, locked_page, start, end, page_started,
    + nr_written, 0, &done_offset);
    + if (ret && ret != -EAGAIN)
    + return ret;

    - if (*page_started)
    - return 0;
    + if (*page_started) {
    + ASSERT(ret == 0);
    + return 0;
    + }
    +
    + if (ret == 0)
    + done_offset = end;
    +
    + if (done_offset == start)
    + return -ENOSPC;
    +
    + if (!locked_page_done) {
    + __set_page_dirty_nobuffers(locked_page);
    + account_page_redirty(locked_page);
    + }
    + locked_page_done = true;
    + extent_write_locked_range(&inode->vfs_inode, start, done_offset);
    +
    + start = done_offset + 1;
    + }

    - __set_page_dirty_nobuffers(locked_page);
    - account_page_redirty(locked_page);
    - extent_write_locked_range(&inode->vfs_inode, start, end);
    *page_started = 1;

    return 0;
    @@ -1675,7 +1710,7 @@ static int fallback_to_cow(struct btrfs_inode *inode, struct page *locked_page,
    }

    return cow_file_range(inode, locked_page, start, end, page_started,
    - nr_written, 1);
    + nr_written, 1, NULL);
    }

    /*
    @@ -2086,7 +2121,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
    page_started, nr_written);
    else
    ret = cow_file_range(inode, locked_page, start, end,
    - page_started, nr_written, 1);
    + page_started, nr_written, 1, NULL);
    } else {
    set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags);
    ret = cow_file_range_async(inode, wbc, locked_page, start, end,
    --
    2.35.1


    \
     
     \ /
      Last update: 2022-08-16 01:30    [W:3.819 / U:0.488 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site