btrfs: add a cached state to extent_clear_unlock_delalloc

Now that we have the lock_extent tightly coupled with
extent_clear_unlock_delalloc we can add a cached state to
extent_clear_unlock_delalloc and benefit from skipping the extra lookup
when we're doing cow.

Reviewed-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2024-04-03 17:29:40 -04:00 committed by David Sterba
parent 8325f41a56
commit 6b0a63a4fa
3 changed files with 28 additions and 19 deletions

View file

@ -412,9 +412,10 @@ noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
struct extent_state **cached,
u32 clear_bits, unsigned long page_ops)
{
clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
__process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
start, end, page_ops);

View file

@ -27,6 +27,7 @@ struct address_space;
struct writeback_control;
struct extent_io_tree;
struct extent_map_tree;
struct extent_state;
struct btrfs_block_group;
struct btrfs_fs_info;
struct btrfs_inode;
@ -352,6 +353,7 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb);
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
struct page *locked_page,
struct extent_state **cached,
u32 bits_to_clear, unsigned long page_ops);
int extent_invalidate_folio(struct extent_io_tree *tree,
struct folio *folio, size_t offset);

View file

@ -762,8 +762,8 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
return ret;
}
free_extent_state(cached);
extent_clear_unlock_delalloc(inode, offset, end, NULL, clear_flags,
extent_clear_unlock_delalloc(inode, offset, end, NULL, &cached,
clear_flags,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
return ret;
@ -1154,6 +1154,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
struct btrfs_ordered_extent *ordered;
struct btrfs_key ins;
struct page *locked_page = NULL;
struct extent_state *cached = NULL;
struct extent_map *em;
int ret = 0;
u64 start = async_extent->start;
@ -1194,7 +1195,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
goto done;
}
lock_extent(io_tree, start, end, NULL);
lock_extent(io_tree, start, end, &cached);
/* Here we're doing allocation and writeback of the compressed pages */
em = create_io_em(inode, start,
@ -1229,7 +1230,7 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
/* Clear dirty, set writeback and unlock the pages. */
extent_clear_unlock_delalloc(inode, start, end,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
NULL, &cached, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_START_WRITEBACK);
btrfs_submit_compressed_write(ordered,
async_extent->folios, /* compressed_folios */
@ -1247,7 +1248,8 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
extent_clear_unlock_delalloc(inode, start, end,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
NULL, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
@ -1329,6 +1331,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_state *cached = NULL;
u64 alloc_hint = 0;
u64 orig_start = start;
u64 num_bytes;
@ -1429,7 +1432,8 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
ram_size = ins.offset;
lock_extent(&inode->io_tree, start, start + ram_size - 1, NULL);
lock_extent(&inode->io_tree, start, start + ram_size - 1,
&cached);
em = create_io_em(inode, start, ins.offset, /* len */
start, /* orig_start */
@ -1441,7 +1445,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
BTRFS_ORDERED_REGULAR /* type */);
if (IS_ERR(em)) {
unlock_extent(&inode->io_tree, start,
start + ram_size - 1, NULL);
start + ram_size - 1, &cached);
ret = PTR_ERR(em);
goto out_reserve;
}
@ -1453,7 +1457,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
BTRFS_COMPRESS_NONE);
if (IS_ERR(ordered)) {
unlock_extent(&inode->io_tree, start,
start + ram_size - 1, NULL);
start + ram_size - 1, &cached);
ret = PTR_ERR(ordered);
goto out_drop_extent_cache;
}
@ -1493,7 +1497,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
page_ops |= PAGE_SET_ORDERED;
extent_clear_unlock_delalloc(inode, start, start + ram_size - 1,
locked_page,
locked_page, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC,
page_ops);
if (num_bytes < cur_alloc_size)
@ -1552,7 +1556,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (!locked_page)
mapping_set_error(inode->vfs_inode.i_mapping, ret);
extent_clear_unlock_delalloc(inode, orig_start, start - 1,
locked_page, 0, page_ops);
locked_page, NULL, 0, page_ops);
}
/*
@ -1575,7 +1579,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (extent_reserved) {
extent_clear_unlock_delalloc(inode, start,
start + cur_alloc_size - 1,
locked_page,
locked_page, &cached,
clear_bits,
page_ops);
start += cur_alloc_size;
@ -1590,7 +1594,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (start < end) {
clear_bits |= EXTENT_CLEAR_DATA_RESV;
extent_clear_unlock_delalloc(inode, start, end, locked_page,
clear_bits, page_ops);
&cached, clear_bits, page_ops);
}
return ret;
}
@ -2206,11 +2210,10 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
btrfs_put_ordered_extent(ordered);
extent_clear_unlock_delalloc(inode, cur_offset, nocow_end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC |
locked_page, &cached_state,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_CLEAR_DATA_RESV,
PAGE_UNLOCK | PAGE_SET_ORDERED);
free_extent_state(cached_state);
cur_offset = extent_end;
@ -2252,10 +2255,13 @@ static noinline int run_delalloc_nocow(struct btrfs_inode *inode,
* we're not locked at this point.
*/
if (cur_offset < end) {
lock_extent(&inode->io_tree, cur_offset, end, NULL);
struct extent_state *cached = NULL;
lock_extent(&inode->io_tree, cur_offset, end, &cached);
extent_clear_unlock_delalloc(inode, cur_offset, end,
locked_page, EXTENT_LOCKED |
EXTENT_DELALLOC | EXTENT_DEFRAG |
locked_page, &cached,
EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DEFRAG |
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);