mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
bcachefs: Fix moving compressed data
bio_uncompress_inplace() used to potentially need to extend the bio to be big enough for the uncompressed data, which has become problematic with multipage bvecs - but, the move extent path actually already allocated the bios to be big enough for the uncompressed data. The promote path needed to be fixed, though. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
0fd7263e2e
commit
a6f4d5bb24
2 changed files with 34 additions and 39 deletions
|
@ -241,19 +241,10 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
|
|||
}
|
||||
|
||||
/*
|
||||
* might have to free existing pages and retry allocation from mempool -
|
||||
* do this _after_ decompressing:
|
||||
* XXX: don't have a good way to assert that the bio was allocated with
|
||||
* enough space, we depend on bch2_move_extent doing the right thing
|
||||
*/
|
||||
if (bio->bi_iter.bi_size < crc->live_size << 9) {
|
||||
if (bch2_bio_alloc_pages(bio, (crc->live_size << 9) -
|
||||
bio->bi_iter.bi_size,
|
||||
GFP_NOFS)) {
|
||||
bch2_bio_free_pages_pool(c, bio);
|
||||
bio->bi_iter.bi_size = 0;
|
||||
bio->bi_vcnt = 0;
|
||||
bch2_bio_alloc_pages_pool(c, bio, crc->live_size << 9);
|
||||
}
|
||||
}
|
||||
bio->bi_iter.bi_size = crc->live_size << 9;
|
||||
|
||||
memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9));
|
||||
|
||||
|
|
|
@ -1039,22 +1039,18 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
|
|||
struct bpos pos,
|
||||
struct extent_ptr_decoded *pick,
|
||||
struct bch_io_opts opts,
|
||||
unsigned rbio_sectors,
|
||||
unsigned sectors,
|
||||
struct bch_read_bio **rbio)
|
||||
{
|
||||
struct promote_op *op = NULL;
|
||||
struct bio *bio;
|
||||
unsigned rbio_pages = DIV_ROUND_UP(rbio_sectors, PAGE_SECTORS);
|
||||
/* data might have to be decompressed in the write path: */
|
||||
unsigned wbio_pages = DIV_ROUND_UP(pick->crc.uncompressed_size,
|
||||
PAGE_SECTORS);
|
||||
unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
|
||||
int ret;
|
||||
|
||||
if (!percpu_ref_tryget(&c->writes))
|
||||
return NULL;
|
||||
|
||||
op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * wbio_pages,
|
||||
GFP_NOIO);
|
||||
op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOIO);
|
||||
if (!op)
|
||||
goto err;
|
||||
|
||||
|
@ -1062,34 +1058,32 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
|
|||
op->pos = pos;
|
||||
|
||||
/*
|
||||
* promotes require bouncing, but if the extent isn't
|
||||
* checksummed/compressed it might be too big for the mempool:
|
||||
* We don't use the mempool here because extents that aren't
|
||||
* checksummed or compressed can be too big for the mempool:
|
||||
*/
|
||||
if (rbio_sectors > c->sb.encoded_extent_max) {
|
||||
*rbio = kzalloc(sizeof(struct bch_read_bio) +
|
||||
sizeof(struct bio_vec) * rbio_pages,
|
||||
GFP_NOIO);
|
||||
if (!*rbio)
|
||||
goto err;
|
||||
*rbio = kzalloc(sizeof(struct bch_read_bio) +
|
||||
sizeof(struct bio_vec) * pages,
|
||||
GFP_NOIO);
|
||||
if (!*rbio)
|
||||
goto err;
|
||||
|
||||
rbio_init(&(*rbio)->bio, opts);
|
||||
bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, rbio_pages, 0);
|
||||
rbio_init(&(*rbio)->bio, opts);
|
||||
bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
|
||||
|
||||
if (bch2_bio_alloc_pages(&(*rbio)->bio, rbio_sectors << 9,
|
||||
GFP_NOIO))
|
||||
goto err;
|
||||
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
|
||||
GFP_NOIO))
|
||||
goto err;
|
||||
|
||||
(*rbio)->bounce = true;
|
||||
(*rbio)->split = true;
|
||||
(*rbio)->kmalloc = true;
|
||||
}
|
||||
(*rbio)->bounce = true;
|
||||
(*rbio)->split = true;
|
||||
(*rbio)->kmalloc = true;
|
||||
|
||||
if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
|
||||
bch_promote_params))
|
||||
goto err;
|
||||
|
||||
bio = &op->write.op.wbio.bio;
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs, wbio_pages, 0);
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
|
||||
|
||||
ret = bch2_migrate_write_init(c, &op->write,
|
||||
writepoint_hashed((unsigned long) current),
|
||||
|
@ -1123,8 +1117,9 @@ static inline struct promote_op *promote_alloc(struct bch_fs *c,
|
|||
bool *read_full)
|
||||
{
|
||||
bool promote_full = *read_full || READ_ONCE(c->promote_whole_extents);
|
||||
/* data might have to be decompressed in the write path: */
|
||||
unsigned sectors = promote_full
|
||||
? pick->crc.compressed_size
|
||||
? max(pick->crc.compressed_size, pick->crc.live_size)
|
||||
: bvec_iter_sectors(iter);
|
||||
struct bpos pos = promote_full
|
||||
? bkey_start_pos(k.k)
|
||||
|
@ -1659,7 +1654,16 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
|
|||
}
|
||||
|
||||
if (rbio) {
|
||||
/* promote already allocated bounce rbio */
|
||||
/*
|
||||
* promote already allocated bounce rbio:
|
||||
* promote needs to allocate a bio big enough for uncompressing
|
||||
* data in the write path, but we're not going to use it all
|
||||
* here:
|
||||
*/
|
||||
BUG_ON(rbio->bio.bi_iter.bi_size <
|
||||
pick.crc.compressed_size << 9);
|
||||
rbio->bio.bi_iter.bi_size =
|
||||
pick.crc.compressed_size << 9;
|
||||
} else if (bounce) {
|
||||
unsigned sectors = pick.crc.compressed_size;
|
||||
|
||||
|
|
Loading…
Reference in a new issue