mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
dm: table detect io beyond device
This patch fixes a panic on shrinking a DM device if there is outstanding I/O to the part of the device that is being removed. (Normally this doesn't happen - a filesystem would be resized first, for example.) The bug is that __clone_and_map() assumes dm_table_find_target() always returns a valid pointer. It may fail if a bio arrives from the block layer but its target sector is no longer included in the DM btree. This patch appends an empty entry to table->targets[] which will be returned by a lookup beyond the end of the device. After calling dm_table_find_target(), __clone_and_map() and target_message() check for this condition using dm_target_is_valid(). Sample test script to trigger oops:
This commit is contained in:
parent
fbdcf18df7
commit
512875bd96
4 changed files with 32 additions and 14 deletions
|
@ -1250,21 +1250,17 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
|
|||
if (!table)
|
||||
goto out_argv;
|
||||
|
||||
if (tmsg->sector >= dm_table_get_size(table)) {
|
||||
ti = dm_table_find_target(table, tmsg->sector);
|
||||
if (!dm_target_is_valid(ti)) {
|
||||
DMWARN("Target message sector outside device.");
|
||||
r = -EINVAL;
|
||||
goto out_table;
|
||||
}
|
||||
|
||||
ti = dm_table_find_target(table, tmsg->sector);
|
||||
if (ti->type->message)
|
||||
} else if (ti->type->message)
|
||||
r = ti->type->message(ti, argc, argv);
|
||||
else {
|
||||
DMWARN("Target type does not support messages");
|
||||
r = -EINVAL;
|
||||
}
|
||||
|
||||
out_table:
|
||||
dm_table_put(table);
|
||||
out_argv:
|
||||
kfree(argv);
|
||||
|
|
|
@ -189,8 +189,10 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
|
|||
|
||||
/*
|
||||
* Allocate both the target array and offset array at once.
|
||||
* Append an empty entry to catch sectors beyond the end of
|
||||
* the device.
|
||||
*/
|
||||
n_highs = (sector_t *) dm_vcalloc(num, sizeof(struct dm_target) +
|
||||
n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
|
||||
sizeof(sector_t));
|
||||
if (!n_highs)
|
||||
return -ENOMEM;
|
||||
|
@ -867,6 +869,9 @@ struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
|
|||
|
||||
/*
|
||||
* Search the btree for the correct target.
|
||||
*
|
||||
* Caller should check returned pointer with dm_target_is_valid()
|
||||
* to trap I/O beyond end of device.
|
||||
*/
|
||||
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
|
||||
{
|
||||
|
|
|
@ -672,13 +672,19 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
|
|||
return clone;
|
||||
}
|
||||
|
||||
static void __clone_and_map(struct clone_info *ci)
|
||||
static int __clone_and_map(struct clone_info *ci)
|
||||
{
|
||||
struct bio *clone, *bio = ci->bio;
|
||||
struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
|
||||
sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
|
||||
struct dm_target *ti;
|
||||
sector_t len = 0, max;
|
||||
struct dm_target_io *tio;
|
||||
|
||||
ti = dm_table_find_target(ci->map, ci->sector);
|
||||
if (!dm_target_is_valid(ti))
|
||||
return -EIO;
|
||||
|
||||
max = max_io_len(ci->md, ci->sector, ti);
|
||||
|
||||
/*
|
||||
* Allocate a target io object.
|
||||
*/
|
||||
|
@ -736,6 +742,9 @@ static void __clone_and_map(struct clone_info *ci)
|
|||
do {
|
||||
if (offset) {
|
||||
ti = dm_table_find_target(ci->map, ci->sector);
|
||||
if (!dm_target_is_valid(ti))
|
||||
return -EIO;
|
||||
|
||||
max = max_io_len(ci->md, ci->sector, ti);
|
||||
|
||||
tio = alloc_tio(ci->md);
|
||||
|
@ -759,6 +768,8 @@ static void __clone_and_map(struct clone_info *ci)
|
|||
|
||||
ci->idx++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -767,6 +778,7 @@ static void __clone_and_map(struct clone_info *ci)
|
|||
static int __split_bio(struct mapped_device *md, struct bio *bio)
|
||||
{
|
||||
struct clone_info ci;
|
||||
int error = 0;
|
||||
|
||||
ci.map = dm_get_table(md);
|
||||
if (unlikely(!ci.map))
|
||||
|
@ -784,11 +796,11 @@ static int __split_bio(struct mapped_device *md, struct bio *bio)
|
|||
ci.idx = bio->bi_idx;
|
||||
|
||||
start_io_acct(ci.io);
|
||||
while (ci.sector_count)
|
||||
__clone_and_map(&ci);
|
||||
while (ci.sector_count && !error)
|
||||
error = __clone_and_map(&ci);
|
||||
|
||||
/* drop the extra reference count */
|
||||
dec_pending(ci.io, 0);
|
||||
dec_pending(ci.io, error);
|
||||
dm_table_put(ci.map);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -112,6 +112,11 @@ int dm_table_resume_targets(struct dm_table *t);
|
|||
int dm_table_any_congested(struct dm_table *t, int bdi_bits);
|
||||
void dm_table_unplug_all(struct dm_table *t);
|
||||
|
||||
/*
|
||||
* To check the return value from dm_table_find_target().
|
||||
*/
|
||||
#define dm_target_is_valid(t) ((t)->table)
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* A registry of target types.
|
||||
*---------------------------------------------------------------*/
|
||||
|
|
Loading…
Reference in a new issue