fs: Add aops->launder_folio

Since the only difference between ->launder_page and ->launder_folio
is the type of the pointer, these can safely use a union without
affecting bisectability.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
This commit is contained in:
Matthew Wilcox (Oracle) 2022-02-09 20:21:52 +00:00
parent f50015a596
commit affa80e8c6
4 changed files with 17 additions and 14 deletions

View file

@ -257,7 +257,7 @@ prototypes::
bool (*isolate_page) (struct page *, isolate_mode_t);
int (*migratepage)(struct address_space *, struct page *, struct page *);
void (*putback_page) (struct page *);
int (*launder_page)(struct page *);
int (*launder_folio)(struct folio *);
bool (*is_partially_uptodate)(struct folio *, size_t from, size_t count);
int (*error_remove_page)(struct address_space *, struct page *);
int (*swap_activate)(struct file *);
@ -285,7 +285,7 @@ direct_IO:
isolate_page: yes
migratepage: yes (both)
putback_page: yes
launder_page: yes
launder_folio: yes
is_partially_uptodate: yes
error_remove_page: yes
swap_activate: no
@ -385,9 +385,9 @@ the kernel assumes that the fs has no private interest in the buffers.
->freepage() is called when the kernel is done dropping the page
from the page cache.
->launder_page() may be called prior to releasing a page if
it is still found to be dirty. It returns zero if the page was successfully
cleaned, or an error value if not. Note that in order to prevent the page
->launder_folio() may be called prior to releasing a folio if
it is still found to be dirty. It returns zero if the folio was successfully
cleaned, or an error value if not. Note that in order to prevent the folio
getting mapped back in and redirtied, it needs to be kept locked
across the entire operation.

View file

@ -745,7 +745,7 @@ cache in your filesystem. The following members are defined:
int (*migratepage) (struct page *, struct page *);
/* put migration-failed page back to right list */
void (*putback_page) (struct page *);
int (*launder_page) (struct page *);
int (*launder_folio) (struct folio *);
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
@ -930,9 +930,9 @@ cache in your filesystem. The following members are defined:
``putback_page``
Called by the VM when isolated page's migration fails.
``launder_page``
Called before freeing a page - it writes back the dirty page.
To prevent redirtying the page, it is kept locked during the
``launder_folio``
Called before freeing a folio - it writes back the dirty folio.
To prevent redirtying the folio, it is kept locked during the
whole operation.
``is_partially_uptodate``

View file

@ -399,7 +399,10 @@ struct address_space_operations {
struct page *, struct page *, enum migrate_mode);
bool (*isolate_page)(struct page *, isolate_mode_t);
void (*putback_page)(struct page *);
int (*launder_page) (struct page *);
union {
int (*launder_page) (struct page *);
int (*launder_folio) (struct folio *);
};
bool (*is_partially_uptodate) (struct folio *, size_t from,
size_t count);
void (*is_dirty_writeback) (struct page *, bool *, bool *);

View file

@ -614,13 +614,13 @@ static int invalidate_complete_folio2(struct address_space *mapping,
return 0;
}
static int do_launder_folio(struct address_space *mapping, struct folio *folio)
static int folio_launder(struct address_space *mapping, struct folio *folio)
{
if (!folio_test_dirty(folio))
return 0;
if (folio->mapping != mapping || mapping->a_ops->launder_page == NULL)
if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL)
return 0;
return mapping->a_ops->launder_page(&folio->page);
return mapping->a_ops->launder_folio(folio);
}
/**
@ -686,7 +686,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
unmap_mapping_folio(folio);
BUG_ON(folio_mapped(folio));
ret2 = do_launder_folio(mapping, folio);
ret2 = folio_launder(mapping, folio);
if (ret2 == 0) {
if (!invalidate_complete_folio2(mapping, folio))
ret2 = -EBUSY;