use ->bd_mapping instead of ->bd_inode->i_mapping

Just the low-hanging fruit...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Link: https://lore.kernel.org/r/20240411145346.2516848-2-viro@zeniv.linux.org.uk
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Al Viro 2024-04-11 15:53:37 +01:00
parent e33aef2c58
commit 224941e837
19 changed files with 35 additions and 35 deletions

View File

@ -76,7 +76,7 @@ static void bdev_write_inode(struct block_device *bdev)
/* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev)
{
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct address_space *mapping = bdev->bd_mapping;
if (mapping_empty(mapping))
return;
@ -88,7 +88,7 @@ static void kill_bdev(struct block_device *bdev)
/* Invalidate clean unused buffers and pagecache. */
void invalidate_bdev(struct block_device *bdev)
{
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct address_space *mapping = bdev->bd_mapping;
if (mapping->nrpages) {
invalidate_bh_lrus();
@ -116,7 +116,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
goto invalidate;
}
truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
if (!(mode & BLK_OPEN_EXCL))
bd_abort_claiming(bdev, truncate_bdev_range);
return 0;
@ -126,7 +126,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
* Someone else has handle exclusively open. Try invalidating instead.
* The 'end' argument is inclusive so the rounding is safe.
*/
return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
return invalidate_inode_pages2_range(bdev->bd_mapping,
lstart >> PAGE_SHIFT,
lend >> PAGE_SHIFT);
}
@ -198,7 +198,7 @@ int sync_blockdev_nowait(struct block_device *bdev)
{
if (!bdev)
return 0;
return filemap_flush(bdev->bd_inode->i_mapping);
return filemap_flush(bdev->bd_mapping);
}
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
@ -210,13 +210,13 @@ int sync_blockdev(struct block_device *bdev)
{
if (!bdev)
return 0;
return filemap_write_and_wait(bdev->bd_inode->i_mapping);
return filemap_write_and_wait(bdev->bd_mapping);
}
EXPORT_SYMBOL(sync_blockdev);
int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
{
return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
return filemap_write_and_wait_range(bdev->bd_mapping,
lstart, lend);
}
EXPORT_SYMBOL(sync_blockdev_range);
@ -445,7 +445,7 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
void bdev_add(struct block_device *bdev, dev_t dev)
{
if (bdev_stable_writes(bdev))
mapping_set_stable_writes(bdev->bd_inode->i_mapping);
mapping_set_stable_writes(bdev->bd_mapping);
bdev->bd_dev = dev;
bdev->bd_inode->i_rdev = dev;
bdev->bd_inode->i_ino = dev;
@ -925,7 +925,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
bdev_file->f_mode |= FMODE_NOWAIT;
if (mode & BLK_OPEN_RESTRICT_WRITES)
bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
bdev_file->f_mapping = bdev->bd_inode->i_mapping;
bdev_file->f_mapping = bdev->bd_mapping;
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
bdev_file->private_data = holder;

View File

@ -398,7 +398,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
op = REQ_OP_ZONE_RESET;
/* Invalidate the page cache, including dirty pages. */
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
filemap_invalidate_lock(bdev->bd_mapping);
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
if (ret)
goto fail;
@ -420,7 +420,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
fail:
if (cmd == BLKRESETZONE)
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
filemap_invalidate_unlock(bdev->bd_mapping);
return ret;
}

View File

@ -745,7 +745,7 @@ void invalidate_disk(struct gendisk *disk)
struct block_device *bdev = disk->part0;
invalidate_bdev(bdev);
bdev->bd_inode->i_mapping->wb_err = 0;
bdev->bd_mapping->wb_err = 0;
set_capacity(disk, 0);
}
EXPORT_SYMBOL(invalidate_disk);

View File

@ -152,12 +152,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
if (start + len > bdev_nr_bytes(bdev))
return -EINVAL;
filemap_invalidate_lock(bdev->bd_inode->i_mapping);
filemap_invalidate_lock(bdev->bd_mapping);
err = truncate_bdev_range(bdev, mode, start, start + len - 1);
if (!err)
err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
GFP_KERNEL);
filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
filemap_invalidate_unlock(bdev->bd_mapping);
return err;
}

View File

@ -704,7 +704,7 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed);
void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p)
{
struct address_space *mapping = state->disk->part0->bd_inode->i_mapping;
struct address_space *mapping = state->disk->part0->bd_mapping;
struct folio *folio;
if (n >= get_capacity(state->disk)) {

View File

@ -171,7 +171,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
struct page *page;
unsigned int i;
page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
page = read_cache_page_gfp(bdev->bd_mapping,
SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
if (IS_ERR(page))
return "IO error";

View File

@ -32,7 +32,7 @@
*/
unsigned char *scsi_bios_ptable(struct block_device *dev)
{
struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping;
struct address_space *mapping = bdev_whole(dev)->bd_mapping;
unsigned char *res = NULL;
struct folio *folio;

View File

@ -3651,7 +3651,7 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
struct btrfs_super_block *super;
struct page *page;
u64 bytenr, bytenr_orig;
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct address_space *mapping = bdev->bd_mapping;
int ret;
bytenr_orig = btrfs_sb_offset(copy_num);
@ -3738,7 +3738,7 @@ static int write_dev_supers(struct btrfs_device *device,
struct btrfs_super_block *sb, int max_mirrors)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct address_space *mapping = device->bdev->bd_inode->i_mapping;
struct address_space *mapping = device->bdev->bd_mapping;
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
int i;
int errors = 0;
@ -3855,7 +3855,7 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
device->commit_total_bytes)
break;
page = find_get_page(device->bdev->bd_inode->i_mapping,
page = find_get_page(device->bdev->bd_mapping,
bytenr >> PAGE_SHIFT);
if (!page) {
errors++;

View File

@ -1290,7 +1290,7 @@ static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev
return ERR_PTR(-EINVAL);
/* pull in the page with our super */
page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL);
if (IS_ERR(page))
return ERR_CAST(page);

View File

@ -118,7 +118,7 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
return -ENOENT;
} else if (full[0] && full[1]) {
/* Compare two super blocks */
struct address_space *mapping = bdev->bd_inode->i_mapping;
struct address_space *mapping = bdev->bd_mapping;
struct page *page[BTRFS_NR_SB_LOG_ZONES];
struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES];
int i;

View File

@ -1463,7 +1463,7 @@ __bread_gfp(struct block_device *bdev, sector_t block,
{
struct buffer_head *bh;
gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
/*
* Prefer looping in the allocator rather than here, at least that

View File

@ -183,7 +183,7 @@ static int next_buffer;
static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset,
unsigned int len)
{
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
struct address_space *mapping = sb->s_bdev->bd_mapping;
struct file_ra_state ra = {};
struct page *pages[BLKS_PER_BUF];
unsigned i, blocknr, buffer;

View File

@ -68,7 +68,7 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
if (erofs_is_fscache_mode(sb))
buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping;
else
buf->mapping = sb->s_bdev->bd_inode->i_mapping;
buf->mapping = sb->s_bdev->bd_mapping;
}
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,

View File

@ -192,7 +192,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
(PAGE_SHIFT - inode->i_blkbits);
if (!ra_has_index(&file->f_ra, index))
page_cache_sync_readahead(
sb->s_bdev->bd_inode->i_mapping,
sb->s_bdev->bd_mapping,
&file->f_ra, file,
index, 1);
file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT;

View File

@ -206,7 +206,7 @@ static void ext4_journal_abort_handle(const char *caller, unsigned int line,
static void ext4_check_bdev_write_error(struct super_block *sb)
{
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
struct address_space *mapping = sb->s_bdev->bd_mapping;
struct ext4_sb_info *sbi = EXT4_SB(sb);
int err;

View File

@ -244,7 +244,7 @@ static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb,
struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
blk_opf_t op_flags)
{
gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping,
gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
~__GFP_FS) | __GFP_MOVABLE;
return __ext4_sb_bread_gfp(sb, block, op_flags, gfp);
@ -253,7 +253,7 @@ struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block,
struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb,
sector_t block)
{
gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping,
gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping,
~__GFP_FS);
return __ext4_sb_bread_gfp(sb, block, 0, gfp);
@ -5556,7 +5556,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
* used to detect the metadata async write error.
*/
spin_lock_init(&sbi->s_bdev_wb_lock);
errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err,
errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err,
&sbi->s_bdev_wb_err);
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);

View File

@ -2009,7 +2009,7 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags)
byte_count = (block_stop - block_start + 1) *
journal->j_blocksize;
truncate_inode_pages_range(journal->j_dev->bd_inode->i_mapping,
truncate_inode_pages_range(journal->j_dev->bd_mapping,
byte_start, byte_stop);
if (flags & JBD2_JOURNAL_FLUSH_DISCARD) {

View File

@ -338,7 +338,7 @@ static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
{
gfp_t gfp;
gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
gfp |= __GFP_NOFAIL;
return bdev_getblk(bdev, block, size, gfp);
@ -349,7 +349,7 @@ static inline struct buffer_head *__getblk(struct block_device *bdev,
{
gfp_t gfp;
gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS);
gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
return bdev_getblk(bdev, block, size, gfp);

View File

@ -1696,7 +1696,7 @@ static inline void jbd2_journal_abort_handle(handle_t *handle)
static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
{
struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
struct address_space *mapping = journal->j_fs_dev->bd_mapping;
/*
* Save the original wb_err value of client fs's bdev mapping which
@ -1707,7 +1707,7 @@ static inline void jbd2_init_fs_dev_write_error(journal_t *journal)
static inline int jbd2_check_fs_dev_write_error(journal_t *journal)
{
struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping;
struct address_space *mapping = journal->j_fs_dev->bd_mapping;
return errseq_check(&mapping->wb_err,
READ_ONCE(journal->j_fs_dev_wb_err));