mirror of
https://github.com/torvalds/linux
synced 2024-11-02 18:48:59 +00:00
btrfs: convert extent_write_cache_pages() to use filemap_get_folios_tag()
Convert function to use folios throughout. This is in preparation for the removal of find_get_pages_range_tag(). Now also supports large folios. Link: https://lkml.kernel.org/r/20230104211448.4804-8-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Acked-by: David Sterba <dsterba@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
51c5cd3baf
commit
9f50fd2e92
1 changed files with 19 additions and 19 deletions
|
@ -2993,8 +2993,8 @@ static int extent_write_cache_pages(struct address_space *mapping,
|
|||
int ret = 0;
|
||||
int done = 0;
|
||||
int nr_to_write_done = 0;
|
||||
struct pagevec pvec;
|
||||
int nr_pages;
|
||||
struct folio_batch fbatch;
|
||||
unsigned int nr_folios;
|
||||
pgoff_t index;
|
||||
pgoff_t end; /* Inclusive */
|
||||
pgoff_t done_index;
|
||||
|
@ -3014,7 +3014,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
|
|||
if (!igrab(inode))
|
||||
return 0;
|
||||
|
||||
pagevec_init(&pvec);
|
||||
folio_batch_init(&fbatch);
|
||||
if (wbc->range_cyclic) {
|
||||
index = mapping->writeback_index; /* Start from prev offset */
|
||||
end = -1;
|
||||
|
@ -3052,14 +3052,14 @@ static int extent_write_cache_pages(struct address_space *mapping,
|
|||
tag_pages_for_writeback(mapping, index, end);
|
||||
done_index = index;
|
||||
while (!done && !nr_to_write_done && (index <= end) &&
|
||||
(nr_pages = pagevec_lookup_range_tag(&pvec, mapping,
|
||||
&index, end, tag))) {
|
||||
(nr_folios = filemap_get_folios_tag(mapping, &index,
|
||||
end, tag, &fbatch))) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
for (i = 0; i < nr_folios; i++) {
|
||||
struct folio *folio = fbatch.folios[i];
|
||||
|
||||
done_index = page->index + 1;
|
||||
done_index = folio->index + folio_nr_pages(folio);
|
||||
/*
|
||||
* At this point we hold neither the i_pages lock nor
|
||||
* the page lock: the page may be truncated or
|
||||
|
@ -3067,29 +3067,29 @@ static int extent_write_cache_pages(struct address_space *mapping,
|
|||
* or even swizzled back from swapper_space to
|
||||
* tmpfs file mapping
|
||||
*/
|
||||
if (!trylock_page(page)) {
|
||||
if (!folio_trylock(folio)) {
|
||||
submit_write_bio(bio_ctrl, 0);
|
||||
lock_page(page);
|
||||
folio_lock(folio);
|
||||
}
|
||||
|
||||
if (unlikely(page->mapping != mapping)) {
|
||||
unlock_page(page);
|
||||
if (unlikely(folio->mapping != mapping)) {
|
||||
folio_unlock(folio);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (wbc->sync_mode != WB_SYNC_NONE) {
|
||||
if (PageWriteback(page))
|
||||
if (folio_test_writeback(folio))
|
||||
submit_write_bio(bio_ctrl, 0);
|
||||
wait_on_page_writeback(page);
|
||||
folio_wait_writeback(folio);
|
||||
}
|
||||
|
||||
if (PageWriteback(page) ||
|
||||
!clear_page_dirty_for_io(page)) {
|
||||
unlock_page(page);
|
||||
if (folio_test_writeback(folio) ||
|
||||
!folio_clear_dirty_for_io(folio)) {
|
||||
folio_unlock(folio);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = __extent_writepage(page, wbc, bio_ctrl);
|
||||
ret = __extent_writepage(&folio->page, wbc, bio_ctrl);
|
||||
if (ret < 0) {
|
||||
done = 1;
|
||||
break;
|
||||
|
@ -3102,7 +3102,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
|
|||
*/
|
||||
nr_to_write_done = wbc->nr_to_write <= 0;
|
||||
}
|
||||
pagevec_release(&pvec);
|
||||
folio_batch_release(&fbatch);
|
||||
cond_resched();
|
||||
}
|
||||
if (!scanned && !done) {
|
||||
|
|
Loading…
Reference in a new issue