mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
btrfs: fix upper limit for max_inline for page size 64K
The mount option max_inline ranges from 0 to the sectorsize (which is now equal to page size). But we parse the mount options too early and before the actual sectorsize is read from the superblock. So the upper limit of max_inline is unaware of the actual sectorsize and is limited by the temporary sectorsize 4096, even on a system where the default sectorsize is 64K. Fix this by reading the superblock sectorsize before the mount option parse. Reported-by: Alexander Tsvetkov <alexander.tsvetkov@oracle.com> CC: stable@vger.kernel.org # 5.4+ Signed-off-by: Anand Jain <anand.jain@oracle.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
0d977e0eba
commit
6f93e834fa
1 changed files with 24 additions and 24 deletions
|
@ -3314,6 +3314,30 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
|
|||
*/
|
||||
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
|
||||
|
||||
/*
|
||||
* Flag our filesystem as having big metadata blocks if they are bigger
|
||||
* than the page size.
|
||||
*/
|
||||
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
|
||||
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
|
||||
btrfs_info(fs_info,
|
||||
"flagging fs with big metadata feature");
|
||||
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
|
||||
}
|
||||
|
||||
/* Set up fs_info before parsing mount options */
|
||||
nodesize = btrfs_super_nodesize(disk_super);
|
||||
sectorsize = btrfs_super_sectorsize(disk_super);
|
||||
stripesize = sectorsize;
|
||||
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
|
||||
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
|
||||
|
||||
fs_info->nodesize = nodesize;
|
||||
fs_info->sectorsize = sectorsize;
|
||||
fs_info->sectorsize_bits = ilog2(sectorsize);
|
||||
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
|
||||
fs_info->stripesize = stripesize;
|
||||
|
||||
ret = btrfs_parse_options(fs_info, options, sb->s_flags);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
|
@ -3340,30 +3364,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
|
|||
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
|
||||
btrfs_info(fs_info, "has skinny extents");
|
||||
|
||||
/*
|
||||
* flag our filesystem as having big metadata blocks if
|
||||
* they are bigger than the page size
|
||||
*/
|
||||
if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
|
||||
if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
|
||||
btrfs_info(fs_info,
|
||||
"flagging fs with big metadata feature");
|
||||
features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
|
||||
}
|
||||
|
||||
nodesize = btrfs_super_nodesize(disk_super);
|
||||
sectorsize = btrfs_super_sectorsize(disk_super);
|
||||
stripesize = sectorsize;
|
||||
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
|
||||
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
|
||||
|
||||
/* Cache block sizes */
|
||||
fs_info->nodesize = nodesize;
|
||||
fs_info->sectorsize = sectorsize;
|
||||
fs_info->sectorsize_bits = ilog2(sectorsize);
|
||||
fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
|
||||
fs_info->stripesize = stripesize;
|
||||
|
||||
/*
|
||||
* mixed block groups end up with duplicate but slightly offset
|
||||
* extent buffers for the same range. It leads to corruptions
|
||||
|
|
Loading…
Reference in a new issue