fs: convert writepage_t callback to pass a folio

Patch series "Convert writepage_t to use a folio".

More folioisation.  I split out the mpage work from everything else
because it completely dominated the patch, but some implementations I just
converted outright.


This patch (of 2):

We always write back an entire folio, but that's currently passed as the
head page.  Convert all filesystems that use write_cache_pages() to expect
a folio instead of a page.

Link: https://lkml.kernel.org/r/20230126201255.1681189-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230126201255.1681189-2-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-01-26 20:12:54 +00:00 committed by Andrew Morton
parent 00cdf76012
commit d585bdbeb7
11 changed files with 44 additions and 44 deletions

View file

@ -2675,14 +2675,14 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
static int
cifs_writepage_locked(struct page *page, struct writeback_control *wbc);
static int cifs_write_one_page(struct page *page, struct writeback_control *wbc,
void *data)
static int cifs_write_one_page(struct folio *folio,
struct writeback_control *wbc, void *data)
{
struct address_space *mapping = data;
int ret;
ret = cifs_writepage_locked(page, wbc);
unlock_page(page);
ret = cifs_writepage_locked(&folio->page, wbc);
folio_unlock(folio);
mapping_set_error(mapping, ret);
return ret;
}

View file

@ -2711,10 +2711,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
return err;
}
static int ext4_writepage_cb(struct page *page, struct writeback_control *wbc,
static int ext4_writepage_cb(struct folio *folio, struct writeback_control *wbc,
void *data)
{
return ext4_writepage(page, wbc);
return ext4_writepage(&folio->page, wbc);
}
static int ext4_do_writepages(struct mpage_da_data *mpd)

View file

@ -482,7 +482,7 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
*
* However, we may have to redirty a page (see below.)
*/
static int ext4_journalled_writepage_callback(struct page *page,
static int ext4_journalled_writepage_callback(struct folio *folio,
struct writeback_control *wbc,
void *data)
{
@ -490,7 +490,7 @@ static int ext4_journalled_writepage_callback(struct page *page,
struct buffer_head *bh, *head;
struct journal_head *jh;
bh = head = page_buffers(page);
bh = head = folio_buffers(folio);
do {
/*
* We have to redirty a page in these cases:
@ -509,7 +509,7 @@ static int ext4_journalled_writepage_callback(struct page *page,
if (buffer_dirty(bh) ||
(jh && (jh->b_transaction != transaction ||
jh->b_next_transaction))) {
redirty_page_for_writepage(wbc, page);
folio_redirty_for_writepage(wbc, folio);
goto out;
}
} while ((bh = bh->b_this_page) != head);

View file

@ -2184,7 +2184,7 @@ static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
return false;
}
static int fuse_writepages_fill(struct page *page,
static int fuse_writepages_fill(struct folio *folio,
struct writeback_control *wbc, void *_data)
{
struct fuse_fill_wb_data *data = _data;
@ -2203,7 +2203,7 @@ static int fuse_writepages_fill(struct page *page,
goto out_unlock;
}
if (wpa && fuse_writepage_need_send(fc, page, ap, data)) {
if (wpa && fuse_writepage_need_send(fc, &folio->page, ap, data)) {
fuse_writepages_send(data);
data->wpa = NULL;
}
@ -2238,7 +2238,7 @@ static int fuse_writepages_fill(struct page *page,
data->max_pages = 1;
ap = &wpa->ia.ap;
fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
fuse_write_args_fill(&wpa->ia, data->ff, folio_pos(folio), 0);
wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
wpa->next = NULL;
ap->args.in_pages = true;
@ -2246,13 +2246,13 @@ static int fuse_writepages_fill(struct page *page,
ap->num_pages = 0;
wpa->inode = inode;
}
set_page_writeback(page);
folio_start_writeback(folio);
copy_highpage(tmp_page, page);
copy_highpage(tmp_page, &folio->page);
ap->pages[ap->num_pages] = tmp_page;
ap->descs[ap->num_pages].offset = 0;
ap->descs[ap->num_pages].length = PAGE_SIZE;
data->orig_pages[ap->num_pages] = page;
data->orig_pages[ap->num_pages] = &folio->page;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
@ -2266,13 +2266,13 @@ static int fuse_writepages_fill(struct page *page,
spin_lock(&fi->lock);
ap->num_pages++;
spin_unlock(&fi->lock);
} else if (fuse_writepage_add(wpa, page)) {
} else if (fuse_writepage_add(wpa, &folio->page)) {
data->wpa = wpa;
} else {
end_page_writeback(page);
folio_end_writeback(folio);
}
out_unlock:
unlock_page(page);
folio_unlock(folio);
return err;
}

View file

@ -1685,10 +1685,9 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* For unwritten space on the page, we need to start the conversion to
* regular allocated space.
*/
static int
iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
static int iomap_do_writepage(struct folio *folio,
struct writeback_control *wbc, void *data)
{
struct folio *folio = page_folio(page);
struct iomap_writepage_ctx *wpc = data;
struct inode *inode = folio->mapping->host;
u64 end_pos, isize;

View file

@ -440,9 +440,10 @@ void clean_page_buffers(struct page *page)
clean_buffers(page, ~0U);
}
static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
void *data)
{
struct page *page = &folio->page;
struct mpage_data *mpd = data;
struct bio *bio = mpd->bio;
struct address_space *mapping = page->mapping;

View file

@ -689,13 +689,14 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
return ret;
}
static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
static int nfs_writepages_callback(struct folio *folio,
struct writeback_control *wbc, void *data)
{
int ret;
ret = nfs_do_writepage(page, wbc, data);
ret = nfs_do_writepage(&folio->page, wbc, data);
if (ret != AOP_WRITEPAGE_ACTIVATE)
unlock_page(page);
folio_unlock(folio);
return ret;
}

View file

@ -832,7 +832,7 @@ int ntfs_set_size(struct inode *inode, u64 new_size)
return err;
}
static int ntfs_resident_writepage(struct page *page,
static int ntfs_resident_writepage(struct folio *folio,
struct writeback_control *wbc, void *data)
{
struct address_space *mapping = data;
@ -840,11 +840,11 @@ static int ntfs_resident_writepage(struct page *page,
int ret;
ni_lock(ni);
ret = attr_data_write_resident(ni, page);
ret = attr_data_write_resident(ni, &folio->page);
ni_unlock(ni);
if (ret != E_NTFS_NONRESIDENT)
unlock_page(page);
folio_unlock(folio);
mapping_set_error(mapping, ret);
return ret;
}

View file

@ -154,21 +154,20 @@ static int orangefs_writepages_work(struct orangefs_writepages *ow,
return ret;
}
static int orangefs_writepages_callback(struct page *page,
struct writeback_control *wbc, void *data)
static int orangefs_writepages_callback(struct folio *folio,
struct writeback_control *wbc, void *data)
{
struct orangefs_writepages *ow = data;
struct orangefs_write_range *wr;
struct orangefs_write_range *wr = folio->private;
int ret;
if (!PagePrivate(page)) {
unlock_page(page);
if (!wr) {
folio_unlock(folio);
/* It's not private so there's nothing to write, right? */
printk("writepages_callback not private!\n");
BUG();
return 0;
}
wr = (struct orangefs_write_range *)page_private(page);
ret = -1;
if (ow->npages == 0) {
@ -176,7 +175,7 @@ static int orangefs_writepages_callback(struct page *page,
ow->len = wr->len;
ow->uid = wr->uid;
ow->gid = wr->gid;
ow->pages[ow->npages++] = page;
ow->pages[ow->npages++] = &folio->page;
ret = 0;
goto done;
}
@ -188,7 +187,7 @@ static int orangefs_writepages_callback(struct page *page,
}
if (ow->off + ow->len == wr->pos) {
ow->len += wr->len;
ow->pages[ow->npages++] = page;
ow->pages[ow->npages++] = &folio->page;
ret = 0;
goto done;
}
@ -198,10 +197,10 @@ static int orangefs_writepages_callback(struct page *page,
orangefs_writepages_work(ow, wbc);
ow->npages = 0;
}
ret = orangefs_writepage_locked(page, wbc);
mapping_set_error(page->mapping, ret);
unlock_page(page);
end_page_writeback(page);
ret = orangefs_writepage_locked(&folio->page, wbc);
mapping_set_error(folio->mapping, ret);
folio_unlock(folio);
folio_end_writeback(folio);
} else {
if (ow->npages == ow->maxpages) {
orangefs_writepages_work(ow, wbc);

View file

@ -366,7 +366,7 @@ int balance_dirty_pages_ratelimited_flags(struct address_space *mapping,
bool wb_over_bg_thresh(struct bdi_writeback *wb);
typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc,
void *data);
void tag_pages_for_writeback(struct address_space *mapping,

View file

@ -2470,7 +2470,7 @@ int write_cache_pages(struct address_space *mapping,
goto continue_unlock;
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
error = writepage(&folio->page, wbc, data);
error = writepage(folio, wbc, data);
if (unlikely(error)) {
/*
* Handle errors according to the type of
@ -2528,11 +2528,11 @@ int write_cache_pages(struct address_space *mapping,
}
EXPORT_SYMBOL(write_cache_pages);
static int writepage_cb(struct page *page, struct writeback_control *wbc,
static int writepage_cb(struct folio *folio, struct writeback_control *wbc,
void *data)
{
struct address_space *mapping = data;
int ret = mapping->a_ops->writepage(page, wbc);
int ret = mapping->a_ops->writepage(&folio->page, wbc);
mapping_set_error(mapping, ret);
return ret;
}