mirror of
https://github.com/torvalds/linux
synced 2024-09-20 02:57:25 +00:00
- Significant refactoring and fixing of how DM core does bio-based IO
accounting with focus on fixing wildly inaccurate IO stats for dm-crypt (and other DM targets that defer bio submission in their own workqueues). End result is proper IO accounting, made possible by targets being updated to use the new dm_submit_bio_remap() interface. - Add hipri bio polling support (REQ_POLLED) to bio-based DM. - Reduce dm_io and dm_target_io structs so that a single dm_io (which contains dm_target_io and first clone bio) weighs in at 256 bytes. For reference the bio struct is 128 bytes. - Various other small cleanups, fixes or improvements in DM core and targets. - Update MAINTAINERS with my kernel.org email address to allow distinction between my "upstream" and "Red" Hats. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmI7ghIACgkQxSPxCi2d A1q04wgAzaRu186WjfCO8MK0uyv3S52Rw1EsgYealAqoPwQJ9KkW2icvjtwRL+fJ 1+w6qE/Da6QdwXj9lGtp1XIXJFipNJSw3PSaE/tV2cXiBemZlzJ5vR6F6dfeYKmV /sGas46H2l+aD4Xr7unUmcN/AYrNIFtnucClY3+DlJFPesXQQc9a/XmL9RX9MrN4 MS9wLkh/5QSG3zReEct/4GVmNSJAjFfLkkeFHtLN82jvvDmnszRT5+aJ06WkXeOz OZmQfOPnJv5MnFUz9DOaRb/fTCoyxzxLnNM5Lt3jyFPk9Jf8Qz9TJ2rgskxsE83u UsCD/Y/QAdDcrRVB5SS6+yx4AS6uSA== =cinj -----END PGP SIGNATURE----- Merge tag 'for-5.18/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm Pull device mapper updates from Mike Snitzer: - Significant refactoring and fixing of how DM core does bio-based IO accounting with focus on fixing wildly inaccurate IO stats for dm-crypt (and other DM targets that defer bio submission in their own workqueues). End result is proper IO accounting, made possible by targets being updated to use the new dm_submit_bio_remap() interface. - Add hipri bio polling support (REQ_POLLED) to bio-based DM. - Reduce dm_io and dm_target_io structs so that a single dm_io (which contains dm_target_io and first clone bio) weighs in at 256 bytes. For reference the bio struct is 128 bytes. - Various other small cleanups, fixes or improvements in DM core and targets. - Update MAINTAINERS with my kernel.org email address to allow distinction between my "upstream" and "Red" Hats. * tag 'for-5.18/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (46 commits) dm: consolidate spinlocks in dm_io struct dm: reduce size of dm_io and dm_target_io structs dm: switch dm_target_io booleans over to proper flags dm: switch dm_io booleans over to proper flags dm: update email address in MAINTAINERS dm: return void from __send_empty_flush dm: factor out dm_io_complete dm cache: use dm_submit_bio_remap dm: simplify dm_sumbit_bio_remap interface dm thin: use dm_submit_bio_remap dm: add WARN_ON_ONCE to dm_submit_bio_remap dm: support bio polling block: add ->poll_bio to block_device_operations dm mpath: use DMINFO instead of printk with KERN_INFO dm: stop using bdevname dm-zoned: remove the ->name field in struct dmz_dev dm: remove unnecessary local variables in __bind dm: requeue IO if mapping table not yet available dm io: remove stale comment block for dm_io() dm thin metadata: remove unused dm_thin_remove_block and __remove ...
This commit is contained in:
commit
b1f8ccdaae
|
@ -5605,7 +5605,7 @@ F: include/linux/devm-helpers.h
|
||||||
|
|
||||||
DEVICE-MAPPER (LVM)
|
DEVICE-MAPPER (LVM)
|
||||||
M: Alasdair Kergon <agk@redhat.com>
|
M: Alasdair Kergon <agk@redhat.com>
|
||||||
M: Mike Snitzer <snitzer@redhat.com>
|
M: Mike Snitzer <snitzer@kernel.org>
|
||||||
M: dm-devel@redhat.com
|
M: dm-devel@redhat.com
|
||||||
L: dm-devel@redhat.com
|
L: dm-devel@redhat.com
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
|
@ -688,7 +688,7 @@ static void __submit_bio(struct bio *bio)
|
||||||
*
|
*
|
||||||
* bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
|
* bio_list_on_stack[0] contains bios submitted by the current ->submit_bio.
|
||||||
* bio_list_on_stack[1] contains bios that were submitted before the current
|
* bio_list_on_stack[1] contains bios that were submitted before the current
|
||||||
* ->submit_bio_bio, but that haven't been processed yet.
|
* ->submit_bio, but that haven't been processed yet.
|
||||||
*/
|
*/
|
||||||
static void __submit_bio_noacct(struct bio *bio)
|
static void __submit_bio_noacct(struct bio *bio)
|
||||||
{
|
{
|
||||||
|
@ -955,7 +955,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
|
blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
|
||||||
int ret;
|
int ret = 0;
|
||||||
|
|
||||||
if (cookie == BLK_QC_T_NONE ||
|
if (cookie == BLK_QC_T_NONE ||
|
||||||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
|
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
|
||||||
|
@ -965,10 +965,14 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
|
||||||
|
|
||||||
if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
|
if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
|
||||||
return 0;
|
return 0;
|
||||||
if (WARN_ON_ONCE(!queue_is_mq(q)))
|
if (queue_is_mq(q)) {
|
||||||
ret = 0; /* not yet implemented, should not happen */
|
|
||||||
else
|
|
||||||
ret = blk_mq_poll(q, cookie, iob, flags);
|
ret = blk_mq_poll(q, cookie, iob, flags);
|
||||||
|
} else {
|
||||||
|
struct gendisk *disk = q->disk;
|
||||||
|
|
||||||
|
if (disk && disk->fops->poll_bio)
|
||||||
|
ret = disk->fops->poll_bio(bio, iob, flags);
|
||||||
|
}
|
||||||
blk_queue_exit(q);
|
blk_queue_exit(q);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -412,6 +412,10 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
|
||||||
struct device *ddev = disk_to_dev(disk);
|
struct device *ddev = disk_to_dev(disk);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
/* Only makes sense for bio-based to set ->poll_bio */
|
||||||
|
if (queue_is_mq(disk->queue) && disk->fops->poll_bio)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The disk queue should now be all set with enough information about
|
* The disk queue should now be all set with enough information about
|
||||||
* the device for the elevator code to pick an adequate default
|
* the device for the elevator code to pick an adequate default
|
||||||
|
|
|
@ -1026,7 +1026,9 @@ static unsigned default_promote_level(struct smq_policy *mq)
|
||||||
* This scheme reminds me of a graph of entropy vs probability of a
|
* This scheme reminds me of a graph of entropy vs probability of a
|
||||||
* binary variable.
|
* binary variable.
|
||||||
*/
|
*/
|
||||||
static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
|
static const unsigned int table[] = {
|
||||||
|
1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
|
||||||
|
};
|
||||||
|
|
||||||
unsigned hits = mq->cache_stats.hits;
|
unsigned hits = mq->cache_stats.hits;
|
||||||
unsigned misses = mq->cache_stats.misses;
|
unsigned misses = mq->cache_stats.misses;
|
||||||
|
|
|
@ -803,7 +803,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
|
||||||
static void accounted_request(struct cache *cache, struct bio *bio)
|
static void accounted_request(struct cache *cache, struct bio *bio)
|
||||||
{
|
{
|
||||||
accounted_begin(cache, bio);
|
accounted_begin(cache, bio);
|
||||||
submit_bio_noacct(bio);
|
dm_submit_bio_remap(bio, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void issue_op(struct bio *bio, void *context)
|
static void issue_op(struct bio *bio, void *context)
|
||||||
|
@ -1708,7 +1708,7 @@ static bool process_bio(struct cache *cache, struct bio *bio)
|
||||||
bool commit_needed;
|
bool commit_needed;
|
||||||
|
|
||||||
if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
|
if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
|
||||||
submit_bio_noacct(bio);
|
dm_submit_bio_remap(bio, NULL);
|
||||||
|
|
||||||
return commit_needed;
|
return commit_needed;
|
||||||
}
|
}
|
||||||
|
@ -1774,7 +1774,7 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
|
||||||
|
|
||||||
if (cache->features.discard_passdown) {
|
if (cache->features.discard_passdown) {
|
||||||
remap_to_origin(cache, bio);
|
remap_to_origin(cache, bio);
|
||||||
submit_bio_noacct(bio);
|
dm_submit_bio_remap(bio, NULL);
|
||||||
} else
|
} else
|
||||||
bio_endio(bio);
|
bio_endio(bio);
|
||||||
|
|
||||||
|
@ -2015,7 +2015,6 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
sector_t metadata_dev_size;
|
sector_t metadata_dev_size;
|
||||||
char b[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
if (!at_least_one_arg(as, error))
|
if (!at_least_one_arg(as, error))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -2029,8 +2028,8 @@ static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
|
||||||
|
|
||||||
metadata_dev_size = get_dev_size(ca->metadata_dev);
|
metadata_dev_size = get_dev_size(ca->metadata_dev);
|
||||||
if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
|
if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
|
||||||
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
|
DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
|
||||||
bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
|
ca->metadata_dev->bdev, THIN_METADATA_MAX_SECTORS);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2357,6 +2356,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||||
|
|
||||||
cache->ti = ca->ti;
|
cache->ti = ca->ti;
|
||||||
ti->private = cache;
|
ti->private = cache;
|
||||||
|
ti->accounts_remapped_io = true;
|
||||||
ti->num_flush_bios = 2;
|
ti->num_flush_bios = 2;
|
||||||
ti->flush_supported = true;
|
ti->flush_supported = true;
|
||||||
|
|
||||||
|
@ -3345,7 +3345,6 @@ static void disable_passdown_if_not_supported(struct cache *cache)
|
||||||
struct block_device *origin_bdev = cache->origin_dev->bdev;
|
struct block_device *origin_bdev = cache->origin_dev->bdev;
|
||||||
struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
|
struct queue_limits *origin_limits = &bdev_get_queue(origin_bdev)->limits;
|
||||||
const char *reason = NULL;
|
const char *reason = NULL;
|
||||||
char buf[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
if (!cache->features.discard_passdown)
|
if (!cache->features.discard_passdown)
|
||||||
return;
|
return;
|
||||||
|
@ -3357,8 +3356,8 @@ static void disable_passdown_if_not_supported(struct cache *cache)
|
||||||
reason = "max discard sectors smaller than a block";
|
reason = "max discard sectors smaller than a block";
|
||||||
|
|
||||||
if (reason) {
|
if (reason) {
|
||||||
DMWARN("Origin device (%s) %s: Disabling discard passdown.",
|
DMWARN("Origin device (%pg) %s: Disabling discard passdown.",
|
||||||
bdevname(origin_bdev, buf), reason);
|
origin_bdev, reason);
|
||||||
cache->features.discard_passdown = false;
|
cache->features.discard_passdown = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1682,7 +1682,6 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char *
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
sector_t metadata_dev_size;
|
sector_t metadata_dev_size;
|
||||||
char b[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
|
r = dm_get_device(clone->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
|
||||||
&clone->metadata_dev);
|
&clone->metadata_dev);
|
||||||
|
@ -1693,8 +1692,8 @@ static int parse_metadata_dev(struct clone *clone, struct dm_arg_set *as, char *
|
||||||
|
|
||||||
metadata_dev_size = get_dev_size(clone->metadata_dev);
|
metadata_dev_size = get_dev_size(clone->metadata_dev);
|
||||||
if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
|
if (metadata_dev_size > DM_CLONE_METADATA_MAX_SECTORS_WARNING)
|
||||||
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
|
DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
|
||||||
bdevname(clone->metadata_dev->bdev, b), DM_CLONE_METADATA_MAX_SECTORS);
|
clone->metadata_dev->bdev, DM_CLONE_METADATA_MAX_SECTORS);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2033,7 +2032,6 @@ static void disable_passdown_if_not_supported(struct clone *clone)
|
||||||
struct block_device *dest_dev = clone->dest_dev->bdev;
|
struct block_device *dest_dev = clone->dest_dev->bdev;
|
||||||
struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
|
struct queue_limits *dest_limits = &bdev_get_queue(dest_dev)->limits;
|
||||||
const char *reason = NULL;
|
const char *reason = NULL;
|
||||||
char buf[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
|
if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags))
|
||||||
return;
|
return;
|
||||||
|
@ -2044,8 +2042,8 @@ static void disable_passdown_if_not_supported(struct clone *clone)
|
||||||
reason = "max discard sectors smaller than a region";
|
reason = "max discard sectors smaller than a region";
|
||||||
|
|
||||||
if (reason) {
|
if (reason) {
|
||||||
DMWARN("Destination device (%s) %s: Disabling discard passdown.",
|
DMWARN("Destination device (%pd) %s: Disabling discard passdown.",
|
||||||
bdevname(dest_dev, buf), reason);
|
dest_dev, reason);
|
||||||
clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
|
clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,11 +64,21 @@ struct mapped_device {
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
struct dax_device *dax_dev;
|
struct dax_device *dax_dev;
|
||||||
|
|
||||||
|
wait_queue_head_t wait;
|
||||||
|
unsigned long __percpu *pending_io;
|
||||||
|
|
||||||
|
/* forced geometry settings */
|
||||||
|
struct hd_geometry geometry;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Processing queue (flush)
|
||||||
|
*/
|
||||||
|
struct workqueue_struct *wq;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A list of ios that arrived while we were suspended.
|
* A list of ios that arrived while we were suspended.
|
||||||
*/
|
*/
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
wait_queue_head_t wait;
|
|
||||||
spinlock_t deferred_lock;
|
spinlock_t deferred_lock;
|
||||||
struct bio_list deferred;
|
struct bio_list deferred;
|
||||||
|
|
||||||
|
@ -83,36 +93,28 @@ struct mapped_device {
|
||||||
struct list_head uevent_list;
|
struct list_head uevent_list;
|
||||||
spinlock_t uevent_lock; /* Protect access to uevent_list */
|
spinlock_t uevent_lock; /* Protect access to uevent_list */
|
||||||
|
|
||||||
|
/* for blk-mq request-based DM support */
|
||||||
|
bool init_tio_pdu:1;
|
||||||
|
struct blk_mq_tag_set *tag_set;
|
||||||
|
|
||||||
|
struct dm_stats stats;
|
||||||
|
|
||||||
/* the number of internal suspends */
|
/* the number of internal suspends */
|
||||||
unsigned internal_suspend_count;
|
unsigned internal_suspend_count;
|
||||||
|
|
||||||
|
int swap_bios;
|
||||||
|
struct semaphore swap_bios_semaphore;
|
||||||
|
struct mutex swap_bios_lock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* io objects are allocated from here.
|
* io objects are allocated from here.
|
||||||
*/
|
*/
|
||||||
struct bio_set io_bs;
|
struct bio_set io_bs;
|
||||||
struct bio_set bs;
|
struct bio_set bs;
|
||||||
|
|
||||||
/*
|
|
||||||
* Processing queue (flush)
|
|
||||||
*/
|
|
||||||
struct workqueue_struct *wq;
|
|
||||||
|
|
||||||
/* forced geometry settings */
|
|
||||||
struct hd_geometry geometry;
|
|
||||||
|
|
||||||
/* kobject and completion */
|
/* kobject and completion */
|
||||||
struct dm_kobject_holder kobj_holder;
|
struct dm_kobject_holder kobj_holder;
|
||||||
|
|
||||||
int swap_bios;
|
|
||||||
struct semaphore swap_bios_semaphore;
|
|
||||||
struct mutex swap_bios_lock;
|
|
||||||
|
|
||||||
struct dm_stats stats;
|
|
||||||
|
|
||||||
/* for blk-mq request-based DM support */
|
|
||||||
struct blk_mq_tag_set *tag_set;
|
|
||||||
bool init_tio_pdu:1;
|
|
||||||
|
|
||||||
struct srcu_struct io_barrier;
|
struct srcu_struct io_barrier;
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_ZONED
|
#ifdef CONFIG_BLK_DEV_ZONED
|
||||||
|
@ -206,35 +208,76 @@ struct dm_table {
|
||||||
/*
|
/*
|
||||||
* One of these is allocated per clone bio.
|
* One of these is allocated per clone bio.
|
||||||
*/
|
*/
|
||||||
#define DM_TIO_MAGIC 7282014
|
#define DM_TIO_MAGIC 28714
|
||||||
struct dm_target_io {
|
struct dm_target_io {
|
||||||
unsigned int magic;
|
unsigned short magic;
|
||||||
|
unsigned short flags;
|
||||||
|
unsigned int target_bio_nr;
|
||||||
struct dm_io *io;
|
struct dm_io *io;
|
||||||
struct dm_target *ti;
|
struct dm_target *ti;
|
||||||
unsigned int target_bio_nr;
|
|
||||||
unsigned int *len_ptr;
|
unsigned int *len_ptr;
|
||||||
bool inside_dm_io;
|
sector_t old_sector;
|
||||||
struct bio clone;
|
struct bio clone;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dm_target_io flags
|
||||||
|
*/
|
||||||
|
enum {
|
||||||
|
DM_TIO_INSIDE_DM_IO,
|
||||||
|
DM_TIO_IS_DUPLICATE_BIO
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
|
||||||
|
{
|
||||||
|
return (tio->flags & (1U << bit)) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
|
||||||
|
{
|
||||||
|
tio->flags |= (1U << bit);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* One of these is allocated per original bio.
|
* One of these is allocated per original bio.
|
||||||
* It contains the first clone used for that original.
|
* It contains the first clone used for that original.
|
||||||
*/
|
*/
|
||||||
#define DM_IO_MAGIC 5191977
|
#define DM_IO_MAGIC 19577
|
||||||
struct dm_io {
|
struct dm_io {
|
||||||
unsigned int magic;
|
unsigned short magic;
|
||||||
struct mapped_device *md;
|
unsigned short flags;
|
||||||
blk_status_t status;
|
|
||||||
atomic_t io_count;
|
atomic_t io_count;
|
||||||
|
struct mapped_device *md;
|
||||||
struct bio *orig_bio;
|
struct bio *orig_bio;
|
||||||
|
blk_status_t status;
|
||||||
|
spinlock_t lock;
|
||||||
unsigned long start_time;
|
unsigned long start_time;
|
||||||
spinlock_t endio_lock;
|
void *data;
|
||||||
|
struct hlist_node node;
|
||||||
|
struct task_struct *map_task;
|
||||||
struct dm_stats_aux stats_aux;
|
struct dm_stats_aux stats_aux;
|
||||||
/* last member of dm_target_io is 'struct bio' */
|
/* last member of dm_target_io is 'struct bio' */
|
||||||
struct dm_target_io tio;
|
struct dm_target_io tio;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dm_io flags
|
||||||
|
*/
|
||||||
|
enum {
|
||||||
|
DM_IO_START_ACCT,
|
||||||
|
DM_IO_ACCOUNTED
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
|
||||||
|
{
|
||||||
|
return (io->flags & (1U << bit)) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
|
||||||
|
{
|
||||||
|
io->flags |= (1U << bit);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void dm_io_inc_pending(struct dm_io *io)
|
static inline void dm_io_inc_pending(struct dm_io *io)
|
||||||
{
|
{
|
||||||
atomic_inc(&io->io_count);
|
atomic_inc(&io->io_count);
|
||||||
|
|
|
@ -1827,6 +1827,8 @@ static void crypt_endio(struct bio *clone)
|
||||||
crypt_dec_pending(io);
|
crypt_dec_pending(io);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define CRYPT_MAP_READ_GFP GFP_NOWAIT
|
||||||
|
|
||||||
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct crypt_config *cc = io->cc;
|
struct crypt_config *cc = io->cc;
|
||||||
|
@ -1854,7 +1856,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
submit_bio_noacct(clone);
|
dm_submit_bio_remap(io->base_bio, clone);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1880,7 +1882,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
|
||||||
{
|
{
|
||||||
struct bio *clone = io->ctx.bio_out;
|
struct bio *clone = io->ctx.bio_out;
|
||||||
|
|
||||||
submit_bio_noacct(clone);
|
dm_submit_bio_remap(io->base_bio, clone);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
|
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
|
||||||
|
@ -1959,7 +1961,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
|
||||||
|
|
||||||
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
|
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
|
||||||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
|
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
|
||||||
submit_bio_noacct(clone);
|
dm_submit_bio_remap(io->base_bio, clone);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2578,7 +2580,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
|
||||||
|
|
||||||
static int get_key_size(char **key_string)
|
static int get_key_size(char **key_string)
|
||||||
{
|
{
|
||||||
return (*key_string[0] == ':') ? -EINVAL : strlen(*key_string) >> 1;
|
return (*key_string[0] == ':') ? -EINVAL : (int)(strlen(*key_string) >> 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_KEYS */
|
#endif /* CONFIG_KEYS */
|
||||||
|
@ -3361,6 +3363,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
|
|
||||||
ti->num_flush_bios = 1;
|
ti->num_flush_bios = 1;
|
||||||
ti->limit_swap_bios = true;
|
ti->limit_swap_bios = true;
|
||||||
|
ti->accounts_remapped_io = true;
|
||||||
|
|
||||||
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
|
dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3429,7 +3432,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
|
||||||
io->ctx.r.req = (struct skcipher_request *)(io + 1);
|
io->ctx.r.req = (struct skcipher_request *)(io + 1);
|
||||||
|
|
||||||
if (bio_data_dir(io->base_bio) == READ) {
|
if (bio_data_dir(io->base_bio) == READ) {
|
||||||
if (kcryptd_io_read(io, GFP_NOWAIT))
|
if (kcryptd_io_read(io, CRYPT_MAP_READ_GFP))
|
||||||
kcryptd_queue_read(io);
|
kcryptd_queue_read(io);
|
||||||
} else
|
} else
|
||||||
kcryptd_queue_crypt(io);
|
kcryptd_queue_crypt(io);
|
||||||
|
@ -3624,7 +3627,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||||
|
|
||||||
static struct target_type crypt_target = {
|
static struct target_type crypt_target = {
|
||||||
.name = "crypt",
|
.name = "crypt",
|
||||||
.version = {1, 23, 0},
|
.version = {1, 24, 0},
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.ctr = crypt_ctr,
|
.ctr = crypt_ctr,
|
||||||
.dtr = crypt_dtr,
|
.dtr = crypt_dtr,
|
||||||
|
|
|
@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio)
|
||||||
while (bio) {
|
while (bio) {
|
||||||
n = bio->bi_next;
|
n = bio->bi_next;
|
||||||
bio->bi_next = NULL;
|
bio->bi_next = NULL;
|
||||||
submit_bio_noacct(bio);
|
dm_submit_bio_remap(bio, NULL);
|
||||||
bio = n;
|
bio = n;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -232,6 +232,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
|
|
||||||
ti->num_flush_bios = 1;
|
ti->num_flush_bios = 1;
|
||||||
ti->num_discard_bios = 1;
|
ti->num_discard_bios = 1;
|
||||||
|
ti->accounts_remapped_io = true;
|
||||||
ti->per_io_data_size = sizeof(struct dm_delay_info);
|
ti->per_io_data_size = sizeof(struct dm_delay_info);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -355,7 +356,7 @@ static int delay_iterate_devices(struct dm_target *ti,
|
||||||
|
|
||||||
static struct target_type delay_target = {
|
static struct target_type delay_target = {
|
||||||
.name = "delay",
|
.name = "delay",
|
||||||
.version = {1, 2, 1},
|
.version = {1, 3, 0},
|
||||||
.features = DM_TARGET_PASSES_INTEGRITY,
|
.features = DM_TARGET_PASSES_INTEGRITY,
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.ctr = delay_ctr,
|
.ctr = delay_ctr,
|
||||||
|
|
|
@ -455,7 +455,7 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
|
||||||
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
|
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
|
||||||
"%sname=%s,uuid=%s;device_resume=no_data;",
|
"%sname=%s,uuid=%s;device_resume=no_data;",
|
||||||
DM_IMA_VERSION_STR, dev_name, dev_uuid);
|
DM_IMA_VERSION_STR, dev_name, dev_uuid);
|
||||||
l += strlen(device_table_data);
|
l = strlen(device_table_data);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -568,7 +568,7 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
|
||||||
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
|
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
|
||||||
"%sname=%s,uuid=%s;device_remove=no_data;",
|
"%sname=%s,uuid=%s;device_remove=no_data;",
|
||||||
DM_IMA_VERSION_STR, dev_name, dev_uuid);
|
DM_IMA_VERSION_STR, dev_name, dev_uuid);
|
||||||
l += strlen(device_table_data);
|
l = strlen(device_table_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(device_table_data + l, remove_all_str, remove_all_len);
|
memcpy(device_table_data + l, remove_all_str, remove_all_len);
|
||||||
|
@ -654,7 +654,7 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
|
||||||
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
|
scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
|
||||||
"%sname=%s,uuid=%s;table_clear=no_data;",
|
"%sname=%s,uuid=%s;table_clear=no_data;",
|
||||||
DM_IMA_VERSION_STR, dev_name, dev_uuid);
|
DM_IMA_VERSION_STR, dev_name, dev_uuid);
|
||||||
l += strlen(device_table_data);
|
l = strlen(device_table_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
capacity_len = strlen(capacity_str);
|
capacity_len = strlen(capacity_str);
|
||||||
|
|
|
@ -525,14 +525,6 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* New collapsed (a)synchronous interface.
|
|
||||||
*
|
|
||||||
* If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
|
|
||||||
* the queue with blk_unplug() some time later or set REQ_SYNC in
|
|
||||||
* io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
|
|
||||||
* the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
|
|
||||||
*/
|
|
||||||
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|
int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|
||||||
struct dm_io_region *where, unsigned long *sync_error_bits)
|
struct dm_io_region *where, unsigned long *sync_error_bits)
|
||||||
{
|
{
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
#include <linux/dm-ioctl.h>
|
#include <linux/dm-ioctl.h>
|
||||||
#include <linux/hdreg.h>
|
#include <linux/hdreg.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
|
#include <linux/nospec.h>
|
||||||
|
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <linux/ima.h>
|
#include <linux/ima.h>
|
||||||
|
@ -1788,6 +1789,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
|
||||||
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
|
if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls));
|
||||||
*ioctl_flags = _ioctls[cmd].flags;
|
*ioctl_flags = _ioctls[cmd].flags;
|
||||||
return _ioctls[cmd].fn;
|
return _ioctls[cmd].fn;
|
||||||
}
|
}
|
||||||
|
|
|
@ -899,10 +899,7 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
|
||||||
if (m->hw_handler_name) {
|
if (m->hw_handler_name) {
|
||||||
r = scsi_dh_attach(q, m->hw_handler_name);
|
r = scsi_dh_attach(q, m->hw_handler_name);
|
||||||
if (r == -EBUSY) {
|
if (r == -EBUSY) {
|
||||||
char b[BDEVNAME_SIZE];
|
DMINFO("retaining handler on device %pg", bdev);
|
||||||
|
|
||||||
printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
|
|
||||||
bdevname(bdev, b));
|
|
||||||
goto retain;
|
goto retain;
|
||||||
}
|
}
|
||||||
if (r < 0) {
|
if (r < 0) {
|
||||||
|
|
|
@ -491,8 +491,13 @@ static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
|
|
||||||
if (unlikely(!ti)) {
|
if (unlikely(!ti)) {
|
||||||
int srcu_idx;
|
int srcu_idx;
|
||||||
struct dm_table *map = dm_get_live_table(md, &srcu_idx);
|
struct dm_table *map;
|
||||||
|
|
||||||
|
map = dm_get_live_table(md, &srcu_idx);
|
||||||
|
if (unlikely(!map)) {
|
||||||
|
dm_put_live_table(md, srcu_idx);
|
||||||
|
return BLK_STS_RESOURCE;
|
||||||
|
}
|
||||||
ti = dm_table_find_target(map, 0);
|
ti = dm_table_find_target(map, 0);
|
||||||
dm_put_live_table(md, srcu_idx);
|
dm_put_live_table(md, srcu_idx);
|
||||||
}
|
}
|
||||||
|
|
|
@ -195,6 +195,7 @@ void dm_stats_init(struct dm_stats *stats)
|
||||||
|
|
||||||
mutex_init(&stats->mutex);
|
mutex_init(&stats->mutex);
|
||||||
INIT_LIST_HEAD(&stats->list);
|
INIT_LIST_HEAD(&stats->list);
|
||||||
|
stats->precise_timestamps = false;
|
||||||
stats->last = alloc_percpu(struct dm_stats_last_position);
|
stats->last = alloc_percpu(struct dm_stats_last_position);
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
last = per_cpu_ptr(stats->last, cpu);
|
last = per_cpu_ptr(stats->last, cpu);
|
||||||
|
@ -231,6 +232,22 @@ void dm_stats_cleanup(struct dm_stats *stats)
|
||||||
mutex_destroy(&stats->mutex);
|
mutex_destroy(&stats->mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
|
||||||
|
{
|
||||||
|
struct list_head *l;
|
||||||
|
struct dm_stat *tmp_s;
|
||||||
|
bool precise_timestamps = false;
|
||||||
|
|
||||||
|
list_for_each(l, &stats->list) {
|
||||||
|
tmp_s = container_of(l, struct dm_stat, list_entry);
|
||||||
|
if (tmp_s->stat_flags & STAT_PRECISE_TIMESTAMPS) {
|
||||||
|
precise_timestamps = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stats->precise_timestamps = precise_timestamps;
|
||||||
|
}
|
||||||
|
|
||||||
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
||||||
sector_t step, unsigned stat_flags,
|
sector_t step, unsigned stat_flags,
|
||||||
unsigned n_histogram_entries,
|
unsigned n_histogram_entries,
|
||||||
|
@ -376,6 +393,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
|
||||||
}
|
}
|
||||||
ret_id = s->id;
|
ret_id = s->id;
|
||||||
list_add_tail_rcu(&s->list_entry, l);
|
list_add_tail_rcu(&s->list_entry, l);
|
||||||
|
|
||||||
|
dm_stats_recalc_precise_timestamps(stats);
|
||||||
|
|
||||||
mutex_unlock(&stats->mutex);
|
mutex_unlock(&stats->mutex);
|
||||||
|
|
||||||
resume_callback(md);
|
resume_callback(md);
|
||||||
|
@ -418,6 +438,9 @@ static int dm_stats_delete(struct dm_stats *stats, int id)
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del_rcu(&s->list_entry);
|
list_del_rcu(&s->list_entry);
|
||||||
|
|
||||||
|
dm_stats_recalc_precise_timestamps(stats);
|
||||||
|
|
||||||
mutex_unlock(&stats->mutex);
|
mutex_unlock(&stats->mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -621,13 +644,14 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
|
||||||
|
|
||||||
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
||||||
sector_t bi_sector, unsigned bi_sectors, bool end,
|
sector_t bi_sector, unsigned bi_sectors, bool end,
|
||||||
unsigned long duration_jiffies,
|
unsigned long start_time,
|
||||||
struct dm_stats_aux *stats_aux)
|
struct dm_stats_aux *stats_aux)
|
||||||
{
|
{
|
||||||
struct dm_stat *s;
|
struct dm_stat *s;
|
||||||
sector_t end_sector;
|
sector_t end_sector;
|
||||||
struct dm_stats_last_position *last;
|
struct dm_stats_last_position *last;
|
||||||
bool got_precise_time;
|
bool got_precise_time;
|
||||||
|
unsigned long duration_jiffies = 0;
|
||||||
|
|
||||||
if (unlikely(!bi_sectors))
|
if (unlikely(!bi_sectors))
|
||||||
return;
|
return;
|
||||||
|
@ -647,16 +671,16 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
||||||
));
|
));
|
||||||
WRITE_ONCE(last->last_sector, end_sector);
|
WRITE_ONCE(last->last_sector, end_sector);
|
||||||
WRITE_ONCE(last->last_rw, bi_rw);
|
WRITE_ONCE(last->last_rw, bi_rw);
|
||||||
}
|
} else
|
||||||
|
duration_jiffies = jiffies - start_time;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
got_precise_time = false;
|
got_precise_time = false;
|
||||||
list_for_each_entry_rcu(s, &stats->list, list_entry) {
|
list_for_each_entry_rcu(s, &stats->list, list_entry) {
|
||||||
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
|
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
|
||||||
if (!end)
|
/* start (!end) duration_ns is set by DM core's alloc_io() */
|
||||||
stats_aux->duration_ns = ktime_to_ns(ktime_get());
|
if (end)
|
||||||
else
|
|
||||||
stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
|
stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
|
||||||
got_precise_time = true;
|
got_precise_time = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,8 +13,7 @@ struct dm_stats {
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
struct list_head list; /* list of struct dm_stat */
|
struct list_head list; /* list of struct dm_stat */
|
||||||
struct dm_stats_last_position __percpu *last;
|
struct dm_stats_last_position __percpu *last;
|
||||||
sector_t last_sector;
|
bool precise_timestamps;
|
||||||
unsigned last_rw;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dm_stats_aux {
|
struct dm_stats_aux {
|
||||||
|
@ -32,7 +31,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
|
||||||
|
|
||||||
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
|
||||||
sector_t bi_sector, unsigned bi_sectors, bool end,
|
sector_t bi_sector, unsigned bi_sectors, bool end,
|
||||||
unsigned long duration_jiffies,
|
unsigned long start_time,
|
||||||
struct dm_stats_aux *aux);
|
struct dm_stats_aux *aux);
|
||||||
|
|
||||||
static inline bool dm_stats_used(struct dm_stats *st)
|
static inline bool dm_stats_used(struct dm_stats *st)
|
||||||
|
@ -40,4 +39,10 @@ static inline bool dm_stats_used(struct dm_stats *st)
|
||||||
return !list_empty(&st->list);
|
return !list_empty(&st->list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void dm_stats_record_start(struct dm_stats *stats, struct dm_stats_aux *aux)
|
||||||
|
{
|
||||||
|
if (unlikely(stats->precise_timestamps))
|
||||||
|
aux->duration_ns = ktime_to_ns(ktime_get());
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -230,15 +230,14 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t dev_size = bdev_nr_sectors(bdev);
|
sector_t dev_size = bdev_nr_sectors(bdev);
|
||||||
unsigned short logical_block_size_sectors =
|
unsigned short logical_block_size_sectors =
|
||||||
limits->logical_block_size >> SECTOR_SHIFT;
|
limits->logical_block_size >> SECTOR_SHIFT;
|
||||||
char b[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
if (!dev_size)
|
if (!dev_size)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if ((start >= dev_size) || (start + len > dev_size)) {
|
if ((start >= dev_size) || (start + len > dev_size)) {
|
||||||
DMWARN("%s: %s too small for target: "
|
DMWARN("%s: %pg too small for target: "
|
||||||
"start=%llu, len=%llu, dev_size=%llu",
|
"start=%llu, len=%llu, dev_size=%llu",
|
||||||
dm_device_name(ti->table->md), bdevname(bdev, b),
|
dm_device_name(ti->table->md), bdev,
|
||||||
(unsigned long long)start,
|
(unsigned long long)start,
|
||||||
(unsigned long long)len,
|
(unsigned long long)len,
|
||||||
(unsigned long long)dev_size);
|
(unsigned long long)dev_size);
|
||||||
|
@ -253,10 +252,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||||
unsigned int zone_sectors = bdev_zone_sectors(bdev);
|
unsigned int zone_sectors = bdev_zone_sectors(bdev);
|
||||||
|
|
||||||
if (start & (zone_sectors - 1)) {
|
if (start & (zone_sectors - 1)) {
|
||||||
DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s",
|
DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg",
|
||||||
dm_device_name(ti->table->md),
|
dm_device_name(ti->table->md),
|
||||||
(unsigned long long)start,
|
(unsigned long long)start,
|
||||||
zone_sectors, bdevname(bdev, b));
|
zone_sectors, bdev);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,10 +269,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||||
* the sector range.
|
* the sector range.
|
||||||
*/
|
*/
|
||||||
if (len & (zone_sectors - 1)) {
|
if (len & (zone_sectors - 1)) {
|
||||||
DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s",
|
DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg",
|
||||||
dm_device_name(ti->table->md),
|
dm_device_name(ti->table->md),
|
||||||
(unsigned long long)len,
|
(unsigned long long)len,
|
||||||
zone_sectors, bdevname(bdev, b));
|
zone_sectors, bdev);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -283,19 +282,19 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
|
||||||
|
|
||||||
if (start & (logical_block_size_sectors - 1)) {
|
if (start & (logical_block_size_sectors - 1)) {
|
||||||
DMWARN("%s: start=%llu not aligned to h/w "
|
DMWARN("%s: start=%llu not aligned to h/w "
|
||||||
"logical block size %u of %s",
|
"logical block size %u of %pg",
|
||||||
dm_device_name(ti->table->md),
|
dm_device_name(ti->table->md),
|
||||||
(unsigned long long)start,
|
(unsigned long long)start,
|
||||||
limits->logical_block_size, bdevname(bdev, b));
|
limits->logical_block_size, bdev);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (len & (logical_block_size_sectors - 1)) {
|
if (len & (logical_block_size_sectors - 1)) {
|
||||||
DMWARN("%s: len=%llu not aligned to h/w "
|
DMWARN("%s: len=%llu not aligned to h/w "
|
||||||
"logical block size %u of %s",
|
"logical block size %u of %pg",
|
||||||
dm_device_name(ti->table->md),
|
dm_device_name(ti->table->md),
|
||||||
(unsigned long long)len,
|
(unsigned long long)len,
|
||||||
limits->logical_block_size, bdevname(bdev, b));
|
limits->logical_block_size, bdev);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,20 +399,19 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
|
||||||
struct queue_limits *limits = data;
|
struct queue_limits *limits = data;
|
||||||
struct block_device *bdev = dev->bdev;
|
struct block_device *bdev = dev->bdev;
|
||||||
struct request_queue *q = bdev_get_queue(bdev);
|
struct request_queue *q = bdev_get_queue(bdev);
|
||||||
char b[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
if (unlikely(!q)) {
|
if (unlikely(!q)) {
|
||||||
DMWARN("%s: Cannot set limits for nonexistent device %s",
|
DMWARN("%s: Cannot set limits for nonexistent device %pg",
|
||||||
dm_device_name(ti->table->md), bdevname(bdev, b));
|
dm_device_name(ti->table->md), bdev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (blk_stack_limits(limits, &q->limits,
|
if (blk_stack_limits(limits, &q->limits,
|
||||||
get_start_sect(bdev) + start) < 0)
|
get_start_sect(bdev) + start) < 0)
|
||||||
DMWARN("%s: adding target device %s caused an alignment inconsistency: "
|
DMWARN("%s: adding target device %pg caused an alignment inconsistency: "
|
||||||
"physical_block_size=%u, logical_block_size=%u, "
|
"physical_block_size=%u, logical_block_size=%u, "
|
||||||
"alignment_offset=%u, start=%llu",
|
"alignment_offset=%u, start=%llu",
|
||||||
dm_device_name(ti->table->md), bdevname(bdev, b),
|
dm_device_name(ti->table->md), bdev,
|
||||||
q->limits.physical_block_size,
|
q->limits.physical_block_size,
|
||||||
q->limits.logical_block_size,
|
q->limits.logical_block_size,
|
||||||
q->limits.alignment_offset,
|
q->limits.alignment_offset,
|
||||||
|
@ -1483,6 +1481,14 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
|
||||||
return &t->targets[(KEYS_PER_NODE * n) + k];
|
return &t->targets[(KEYS_PER_NODE * n) + k];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int device_not_poll_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||||
|
sector_t start, sector_t len, void *data)
|
||||||
|
{
|
||||||
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||||
|
|
||||||
|
return !test_bit(QUEUE_FLAG_POLL, &q->queue_flags);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* type->iterate_devices() should be called when the sanity check needs to
|
* type->iterate_devices() should be called when the sanity check needs to
|
||||||
* iterate and check all underlying data devices. iterate_devices() will
|
* iterate and check all underlying data devices. iterate_devices() will
|
||||||
|
@ -1533,6 +1539,11 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int dm_table_supports_poll(struct dm_table *t)
|
||||||
|
{
|
||||||
|
return !dm_table_any_dev_attr(t, device_not_poll_capable, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether a table has no data devices attached using each
|
* Check whether a table has no data devices attached using each
|
||||||
* target's iterate_devices method.
|
* target's iterate_devices method.
|
||||||
|
@ -2069,6 +2080,20 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||||
dm_update_crypto_profile(q, t);
|
dm_update_crypto_profile(q, t);
|
||||||
disk_update_readahead(t->md->disk);
|
disk_update_readahead(t->md->disk);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check for request-based device is left to
|
||||||
|
* dm_mq_init_request_queue()->blk_mq_init_allocated_queue().
|
||||||
|
*
|
||||||
|
* For bio-based device, only set QUEUE_FLAG_POLL when all
|
||||||
|
* underlying devices supporting polling.
|
||||||
|
*/
|
||||||
|
if (__table_type_bio_based(t->type)) {
|
||||||
|
if (dm_table_supports_poll(t))
|
||||||
|
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
|
||||||
|
else
|
||||||
|
blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1665,22 +1665,6 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __remove(struct dm_thin_device *td, dm_block_t block)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
struct dm_pool_metadata *pmd = td->pmd;
|
|
||||||
dm_block_t keys[2] = { td->id, block };
|
|
||||||
|
|
||||||
r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
td->mapped_blocks--;
|
|
||||||
td->changed = true;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
|
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
@ -1740,18 +1724,6 @@ static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_
|
||||||
return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
|
return dm_btree_insert(&pmd->tl_info, pmd->root, keys, &value, &pmd->root);
|
||||||
}
|
}
|
||||||
|
|
||||||
int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
|
|
||||||
{
|
|
||||||
int r = -EINVAL;
|
|
||||||
|
|
||||||
pmd_write_lock(td->pmd);
|
|
||||||
if (!td->pmd->fail_io)
|
|
||||||
r = __remove(td, block);
|
|
||||||
pmd_write_unlock(td->pmd);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
int dm_thin_remove_range(struct dm_thin_device *td,
|
int dm_thin_remove_range(struct dm_thin_device *td,
|
||||||
dm_block_t begin, dm_block_t end)
|
dm_block_t begin, dm_block_t end)
|
||||||
{
|
{
|
||||||
|
|
|
@ -166,7 +166,6 @@ int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result);
|
||||||
int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
|
int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
|
||||||
dm_block_t data_block);
|
dm_block_t data_block);
|
||||||
|
|
||||||
int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
|
|
||||||
int dm_thin_remove_range(struct dm_thin_device *td,
|
int dm_thin_remove_range(struct dm_thin_device *td,
|
||||||
dm_block_t begin, dm_block_t end);
|
dm_block_t begin, dm_block_t end);
|
||||||
|
|
||||||
|
|
|
@ -161,7 +161,7 @@ static void throttle_work_start(struct throttle *t)
|
||||||
|
|
||||||
static void throttle_work_update(struct throttle *t)
|
static void throttle_work_update(struct throttle *t)
|
||||||
{
|
{
|
||||||
if (!t->throttle_applied && jiffies > t->threshold) {
|
if (!t->throttle_applied && time_is_before_jiffies(t->threshold)) {
|
||||||
down_write(&t->lock);
|
down_write(&t->lock);
|
||||||
t->throttle_applied = true;
|
t->throttle_applied = true;
|
||||||
}
|
}
|
||||||
|
@ -755,7 +755,7 @@ static void issue(struct thin_c *tc, struct bio *bio)
|
||||||
struct pool *pool = tc->pool;
|
struct pool *pool = tc->pool;
|
||||||
|
|
||||||
if (!bio_triggers_commit(tc, bio)) {
|
if (!bio_triggers_commit(tc, bio)) {
|
||||||
submit_bio_noacct(bio);
|
dm_submit_bio_remap(bio, NULL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2383,7 +2383,7 @@ static void process_deferred_bios(struct pool *pool)
|
||||||
if (bio->bi_opf & REQ_PREFLUSH)
|
if (bio->bi_opf & REQ_PREFLUSH)
|
||||||
bio_endio(bio);
|
bio_endio(bio);
|
||||||
else
|
else
|
||||||
submit_bio_noacct(bio);
|
dm_submit_bio_remap(bio, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2824,7 +2824,6 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
|
||||||
struct block_device *data_bdev = pt->data_dev->bdev;
|
struct block_device *data_bdev = pt->data_dev->bdev;
|
||||||
struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
|
struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
|
||||||
const char *reason = NULL;
|
const char *reason = NULL;
|
||||||
char buf[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
if (!pt->adjusted_pf.discard_passdown)
|
if (!pt->adjusted_pf.discard_passdown)
|
||||||
return;
|
return;
|
||||||
|
@ -2836,7 +2835,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
|
||||||
reason = "max discard sectors smaller than a block";
|
reason = "max discard sectors smaller than a block";
|
||||||
|
|
||||||
if (reason) {
|
if (reason) {
|
||||||
DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
|
DMWARN("Data device (%pg) %s: Disabling discard passdown.", data_bdev, reason);
|
||||||
pt->adjusted_pf.discard_passdown = false;
|
pt->adjusted_pf.discard_passdown = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3201,11 +3200,10 @@ static sector_t get_dev_size(struct block_device *bdev)
|
||||||
static void warn_if_metadata_device_too_big(struct block_device *bdev)
|
static void warn_if_metadata_device_too_big(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
sector_t metadata_dev_size = get_dev_size(bdev);
|
sector_t metadata_dev_size = get_dev_size(bdev);
|
||||||
char buffer[BDEVNAME_SIZE];
|
|
||||||
|
|
||||||
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
|
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
|
||||||
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
|
DMWARN("Metadata device %pg is larger than %u sectors: excess space will not be used.",
|
||||||
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
|
bdev, THIN_METADATA_MAX_SECTORS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static sector_t get_metadata_dev_size(struct block_device *bdev)
|
static sector_t get_metadata_dev_size(struct block_device *bdev)
|
||||||
|
@ -4233,6 +4231,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||||
|
|
||||||
ti->num_flush_bios = 1;
|
ti->num_flush_bios = 1;
|
||||||
ti->flush_supported = true;
|
ti->flush_supported = true;
|
||||||
|
ti->accounts_remapped_io = true;
|
||||||
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
|
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
|
||||||
|
|
||||||
/* In case the pool supports discards, pass them on. */
|
/* In case the pool supports discards, pass them on. */
|
||||||
|
|
|
@ -1101,8 +1101,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
|
||||||
*/
|
*/
|
||||||
static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
|
static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
|
||||||
{
|
{
|
||||||
dmz_zmd_debug(zmd, "read superblock set %d dev %s block %llu",
|
dmz_zmd_debug(zmd, "read superblock set %d dev %pg block %llu",
|
||||||
set, sb->dev->name, sb->block);
|
set, sb->dev->bdev, sb->block);
|
||||||
|
|
||||||
return dmz_rdwr_block(sb->dev, REQ_OP_READ,
|
return dmz_rdwr_block(sb->dev, REQ_OP_READ,
|
||||||
sb->block, sb->mblk->page);
|
sb->block, sb->mblk->page);
|
||||||
|
|
|
@ -730,7 +730,6 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
|
||||||
}
|
}
|
||||||
dev->bdev = bdev;
|
dev->bdev = bdev;
|
||||||
dev->dev_idx = idx;
|
dev->dev_idx = idx;
|
||||||
(void)bdevname(dev->bdev, dev->name);
|
|
||||||
|
|
||||||
dev->capacity = bdev_nr_sectors(bdev);
|
dev->capacity = bdev_nr_sectors(bdev);
|
||||||
if (ti->begin) {
|
if (ti->begin) {
|
||||||
|
|
|
@ -56,7 +56,6 @@ struct dmz_dev {
|
||||||
struct dmz_metadata *metadata;
|
struct dmz_metadata *metadata;
|
||||||
struct dmz_reclaim *reclaim;
|
struct dmz_reclaim *reclaim;
|
||||||
|
|
||||||
char name[BDEVNAME_SIZE];
|
|
||||||
uuid_t uuid;
|
uuid_t uuid;
|
||||||
|
|
||||||
sector_t capacity;
|
sector_t capacity;
|
||||||
|
@ -176,16 +175,16 @@ enum {
|
||||||
* Message functions.
|
* Message functions.
|
||||||
*/
|
*/
|
||||||
#define dmz_dev_info(dev, format, args...) \
|
#define dmz_dev_info(dev, format, args...) \
|
||||||
DMINFO("(%s): " format, (dev)->name, ## args)
|
DMINFO("(%pg): " format, (dev)->bdev, ## args)
|
||||||
|
|
||||||
#define dmz_dev_err(dev, format, args...) \
|
#define dmz_dev_err(dev, format, args...) \
|
||||||
DMERR("(%s): " format, (dev)->name, ## args)
|
DMERR("(%pg): " format, (dev)->bdev, ## args)
|
||||||
|
|
||||||
#define dmz_dev_warn(dev, format, args...) \
|
#define dmz_dev_warn(dev, format, args...) \
|
||||||
DMWARN("(%s): " format, (dev)->name, ## args)
|
DMWARN("(%pg): " format, (dev)->bdev, ## args)
|
||||||
|
|
||||||
#define dmz_dev_debug(dev, format, args...) \
|
#define dmz_dev_debug(dev, format, args...) \
|
||||||
DMDEBUG("(%s): " format, (dev)->name, ## args)
|
DMDEBUG("(%pg): " format, (dev)->bdev, ## args)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Functions defined in dm-zoned-metadata.c
|
* Functions defined in dm-zoned-metadata.c
|
||||||
|
|
597
drivers/md/dm.c
597
drivers/md/dm.c
|
@ -40,6 +40,13 @@
|
||||||
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
|
#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
|
||||||
#define DM_COOKIE_LENGTH 24
|
#define DM_COOKIE_LENGTH 24
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For REQ_POLLED fs bio, this flag is set if we link mapped underlying
|
||||||
|
* dm_io into one list, and reuse bio->bi_private as the list head. Before
|
||||||
|
* ending this fs bio, we will recover its ->bi_private.
|
||||||
|
*/
|
||||||
|
#define REQ_DM_POLL_LIST REQ_DRV
|
||||||
|
|
||||||
static const char *_name = DM_NAME;
|
static const char *_name = DM_NAME;
|
||||||
|
|
||||||
static unsigned int major = 0;
|
static unsigned int major = 0;
|
||||||
|
@ -73,6 +80,7 @@ struct clone_info {
|
||||||
struct dm_io *io;
|
struct dm_io *io;
|
||||||
sector_t sector;
|
sector_t sector;
|
||||||
unsigned sector_count;
|
unsigned sector_count;
|
||||||
|
bool submit_as_polled;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
|
#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
|
||||||
|
@ -86,7 +94,7 @@ static inline struct dm_target_io *clone_to_tio(struct bio *clone)
|
||||||
|
|
||||||
void *dm_per_bio_data(struct bio *bio, size_t data_size)
|
void *dm_per_bio_data(struct bio *bio, size_t data_size)
|
||||||
{
|
{
|
||||||
if (!clone_to_tio(bio)->inside_dm_io)
|
if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
|
||||||
return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
|
return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
|
||||||
return (char *)bio - DM_IO_BIO_OFFSET - data_size;
|
return (char *)bio - DM_IO_BIO_OFFSET - data_size;
|
||||||
}
|
}
|
||||||
|
@ -485,33 +493,74 @@ u64 dm_start_time_ns_from_clone(struct bio *bio)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
|
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
|
||||||
|
|
||||||
static void start_io_acct(struct dm_io *io)
|
static bool bio_is_flush_with_data(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct mapped_device *md = io->md;
|
return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
|
||||||
struct bio *bio = io->orig_bio;
|
|
||||||
|
|
||||||
bio_start_io_acct_time(bio, io->start_time);
|
|
||||||
if (unlikely(dm_stats_used(&md->stats)))
|
|
||||||
dm_stats_account_io(&md->stats, bio_data_dir(bio),
|
|
||||||
bio->bi_iter.bi_sector, bio_sectors(bio),
|
|
||||||
false, 0, &io->stats_aux);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void end_io_acct(struct mapped_device *md, struct bio *bio,
|
static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio,
|
||||||
unsigned long start_time, struct dm_stats_aux *stats_aux)
|
unsigned long start_time, struct dm_stats_aux *stats_aux)
|
||||||
{
|
{
|
||||||
unsigned long duration = jiffies - start_time;
|
bool is_flush_with_data;
|
||||||
|
unsigned int bi_size;
|
||||||
|
|
||||||
|
/* If REQ_PREFLUSH set save any payload but do not account it */
|
||||||
|
is_flush_with_data = bio_is_flush_with_data(bio);
|
||||||
|
if (is_flush_with_data) {
|
||||||
|
bi_size = bio->bi_iter.bi_size;
|
||||||
|
bio->bi_iter.bi_size = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!end)
|
||||||
|
bio_start_io_acct_time(bio, start_time);
|
||||||
|
else
|
||||||
bio_end_io_acct(bio, start_time);
|
bio_end_io_acct(bio, start_time);
|
||||||
|
|
||||||
if (unlikely(dm_stats_used(&md->stats)))
|
if (unlikely(dm_stats_used(&md->stats)))
|
||||||
dm_stats_account_io(&md->stats, bio_data_dir(bio),
|
dm_stats_account_io(&md->stats, bio_data_dir(bio),
|
||||||
bio->bi_iter.bi_sector, bio_sectors(bio),
|
bio->bi_iter.bi_sector, bio_sectors(bio),
|
||||||
true, duration, stats_aux);
|
end, start_time, stats_aux);
|
||||||
|
|
||||||
/* nudge anyone waiting on suspend queue */
|
/* Restore bio's payload so it does get accounted upon requeue */
|
||||||
if (unlikely(wq_has_sleeper(&md->wait)))
|
if (is_flush_with_data)
|
||||||
wake_up(&md->wait);
|
bio->bi_iter.bi_size = bi_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __dm_start_io_acct(struct dm_io *io, struct bio *bio)
|
||||||
|
{
|
||||||
|
dm_io_acct(false, io->md, bio, io->start_time, &io->stats_aux);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
|
||||||
|
{
|
||||||
|
/* Must account IO to DM device in terms of orig_bio */
|
||||||
|
struct bio *bio = io->orig_bio;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure IO accounting is only ever started once.
|
||||||
|
* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO.
|
||||||
|
*/
|
||||||
|
if (!clone ||
|
||||||
|
likely(!dm_tio_flagged(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO))) {
|
||||||
|
if (WARN_ON_ONCE(dm_io_flagged(io, DM_IO_ACCOUNTED)))
|
||||||
|
return;
|
||||||
|
dm_io_set_flag(io, DM_IO_ACCOUNTED);
|
||||||
|
} else {
|
||||||
|
unsigned long flags;
|
||||||
|
if (dm_io_flagged(io, DM_IO_ACCOUNTED))
|
||||||
|
return;
|
||||||
|
/* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
|
||||||
|
spin_lock_irqsave(&io->lock, flags);
|
||||||
|
dm_io_set_flag(io, DM_IO_ACCOUNTED);
|
||||||
|
spin_unlock_irqrestore(&io->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
__dm_start_io_acct(io, bio);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dm_end_io_acct(struct dm_io *io, struct bio *bio)
|
||||||
|
{
|
||||||
|
dm_io_acct(true, io->md, bio, io->start_time, &io->stats_aux);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
||||||
|
@ -523,23 +572,28 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
|
||||||
clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs);
|
clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs);
|
||||||
|
|
||||||
tio = clone_to_tio(clone);
|
tio = clone_to_tio(clone);
|
||||||
tio->inside_dm_io = true;
|
tio->flags = 0;
|
||||||
|
dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
|
||||||
tio->io = NULL;
|
tio->io = NULL;
|
||||||
|
|
||||||
io = container_of(tio, struct dm_io, tio);
|
io = container_of(tio, struct dm_io, tio);
|
||||||
io->magic = DM_IO_MAGIC;
|
io->magic = DM_IO_MAGIC;
|
||||||
io->status = 0;
|
io->status = 0;
|
||||||
atomic_set(&io->io_count, 1);
|
atomic_set(&io->io_count, 1);
|
||||||
io->orig_bio = bio;
|
this_cpu_inc(*md->pending_io);
|
||||||
|
io->orig_bio = NULL;
|
||||||
io->md = md;
|
io->md = md;
|
||||||
spin_lock_init(&io->endio_lock);
|
io->map_task = current;
|
||||||
|
spin_lock_init(&io->lock);
|
||||||
io->start_time = jiffies;
|
io->start_time = jiffies;
|
||||||
|
io->flags = 0;
|
||||||
|
|
||||||
|
dm_stats_record_start(&md->stats, &io->stats_aux);
|
||||||
|
|
||||||
return io;
|
return io;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_io(struct mapped_device *md, struct dm_io *io)
|
static void free_io(struct dm_io *io)
|
||||||
{
|
{
|
||||||
bio_put(&io->tio.clone);
|
bio_put(&io->tio.clone);
|
||||||
}
|
}
|
||||||
|
@ -548,18 +602,24 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
|
||||||
unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
|
unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
struct dm_target_io *tio;
|
struct dm_target_io *tio;
|
||||||
|
struct bio *clone;
|
||||||
|
|
||||||
if (!ci->io->tio.io) {
|
if (!ci->io->tio.io) {
|
||||||
/* the dm_target_io embedded in ci->io is available */
|
/* the dm_target_io embedded in ci->io is available */
|
||||||
tio = &ci->io->tio;
|
tio = &ci->io->tio;
|
||||||
|
/* alloc_io() already initialized embedded clone */
|
||||||
|
clone = &tio->clone;
|
||||||
} else {
|
} else {
|
||||||
struct bio *clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio,
|
clone = bio_alloc_clone(ci->bio->bi_bdev, ci->bio,
|
||||||
gfp_mask, &ci->io->md->bs);
|
gfp_mask, &ci->io->md->bs);
|
||||||
if (!clone)
|
if (!clone)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* REQ_DM_POLL_LIST shouldn't be inherited */
|
||||||
|
clone->bi_opf &= ~REQ_DM_POLL_LIST;
|
||||||
|
|
||||||
tio = clone_to_tio(clone);
|
tio = clone_to_tio(clone);
|
||||||
tio->inside_dm_io = false;
|
tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
|
||||||
}
|
}
|
||||||
|
|
||||||
tio->magic = DM_TIO_MAGIC;
|
tio->magic = DM_TIO_MAGIC;
|
||||||
|
@ -567,13 +627,20 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
|
||||||
tio->ti = ti;
|
tio->ti = ti;
|
||||||
tio->target_bio_nr = target_bio_nr;
|
tio->target_bio_nr = target_bio_nr;
|
||||||
tio->len_ptr = len;
|
tio->len_ptr = len;
|
||||||
|
tio->old_sector = 0;
|
||||||
|
|
||||||
return &tio->clone;
|
if (len) {
|
||||||
|
clone->bi_iter.bi_size = to_bytes(*len);
|
||||||
|
if (bio_integrity(clone))
|
||||||
|
bio_integrity_trim(clone);
|
||||||
|
}
|
||||||
|
|
||||||
|
return clone;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_tio(struct bio *clone)
|
static void free_tio(struct bio *clone)
|
||||||
{
|
{
|
||||||
if (clone_to_tio(clone)->inside_dm_io)
|
if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
|
||||||
return;
|
return;
|
||||||
bio_put(clone);
|
bio_put(clone);
|
||||||
}
|
}
|
||||||
|
@ -780,30 +847,14 @@ static int __noflush_suspending(struct mapped_device *md)
|
||||||
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
|
return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void dm_io_complete(struct dm_io *io)
|
||||||
* Decrements the number of outstanding ios that a bio has been
|
|
||||||
* cloned into, completing the original io if necc.
|
|
||||||
*/
|
|
||||||
void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
blk_status_t io_error;
|
blk_status_t io_error;
|
||||||
struct bio *bio;
|
|
||||||
struct mapped_device *md = io->md;
|
struct mapped_device *md = io->md;
|
||||||
unsigned long start_time = 0;
|
struct bio *bio = io->orig_bio;
|
||||||
struct dm_stats_aux stats_aux;
|
|
||||||
|
|
||||||
/* Push-back supersedes any I/O errors */
|
|
||||||
if (unlikely(error)) {
|
|
||||||
spin_lock_irqsave(&io->endio_lock, flags);
|
|
||||||
if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md)))
|
|
||||||
io->status = error;
|
|
||||||
spin_unlock_irqrestore(&io->endio_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (atomic_dec_and_test(&io->io_count)) {
|
|
||||||
bio = io->orig_bio;
|
|
||||||
if (io->status == BLK_STS_DM_REQUEUE) {
|
if (io->status == BLK_STS_DM_REQUEUE) {
|
||||||
|
unsigned long flags;
|
||||||
/*
|
/*
|
||||||
* Target requested pushing back the I/O.
|
* Target requested pushing back the I/O.
|
||||||
*/
|
*/
|
||||||
|
@ -823,15 +874,35 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
|
||||||
}
|
}
|
||||||
|
|
||||||
io_error = io->status;
|
io_error = io->status;
|
||||||
start_time = io->start_time;
|
if (dm_io_flagged(io, DM_IO_ACCOUNTED))
|
||||||
stats_aux = io->stats_aux;
|
dm_end_io_acct(io, bio);
|
||||||
free_io(md, io);
|
else if (!io_error) {
|
||||||
end_io_acct(md, bio, start_time, &stats_aux);
|
/*
|
||||||
|
* Must handle target that DM_MAPIO_SUBMITTED only to
|
||||||
|
* then bio_endio() rather than dm_submit_bio_remap()
|
||||||
|
*/
|
||||||
|
__dm_start_io_acct(io, bio);
|
||||||
|
dm_end_io_acct(io, bio);
|
||||||
|
}
|
||||||
|
free_io(io);
|
||||||
|
smp_wmb();
|
||||||
|
this_cpu_dec(*md->pending_io);
|
||||||
|
|
||||||
if (io_error == BLK_STS_DM_REQUEUE)
|
/* nudge anyone waiting on suspend queue */
|
||||||
|
if (unlikely(wq_has_sleeper(&md->wait)))
|
||||||
|
wake_up(&md->wait);
|
||||||
|
|
||||||
|
if (io_error == BLK_STS_DM_REQUEUE) {
|
||||||
|
/*
|
||||||
|
* Upper layer won't help us poll split bio, io->orig_bio
|
||||||
|
* may only reflect a subset of the pre-split original,
|
||||||
|
* so clear REQ_POLLED in case of requeue
|
||||||
|
*/
|
||||||
|
bio->bi_opf &= ~REQ_POLLED;
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
|
if (bio_is_flush_with_data(bio)) {
|
||||||
/*
|
/*
|
||||||
* Preflush done for flush with data, reissue
|
* Preflush done for flush with data, reissue
|
||||||
* without REQ_PREFLUSH.
|
* without REQ_PREFLUSH.
|
||||||
|
@ -845,6 +916,31 @@ void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
|
||||||
bio_endio(bio);
|
bio_endio(bio);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool dm_tio_is_normal(struct dm_target_io *tio)
|
||||||
|
{
|
||||||
|
return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
|
||||||
|
!dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Decrements the number of outstanding ios that a bio has been
|
||||||
|
* cloned into, completing the original io if necc.
|
||||||
|
*/
|
||||||
|
void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
|
||||||
|
{
|
||||||
|
/* Push-back supersedes any I/O errors */
|
||||||
|
if (unlikely(error)) {
|
||||||
|
unsigned long flags;
|
||||||
|
spin_lock_irqsave(&io->lock, flags);
|
||||||
|
if (!(io->status == BLK_STS_DM_REQUEUE &&
|
||||||
|
__noflush_suspending(io->md)))
|
||||||
|
io->status = error;
|
||||||
|
spin_unlock_irqrestore(&io->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (atomic_dec_and_test(&io->io_count))
|
||||||
|
dm_io_complete(io);
|
||||||
}
|
}
|
||||||
|
|
||||||
void disable_discard(struct mapped_device *md)
|
void disable_discard(struct mapped_device *md)
|
||||||
|
@ -1058,7 +1154,8 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
|
||||||
/*
|
/*
|
||||||
* A target may call dm_accept_partial_bio only from the map routine. It is
|
* A target may call dm_accept_partial_bio only from the map routine. It is
|
||||||
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
|
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
|
||||||
* operations and REQ_OP_ZONE_APPEND (zone append writes).
|
* operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
|
||||||
|
* __send_duplicate_bios().
|
||||||
*
|
*
|
||||||
* dm_accept_partial_bio informs the dm that the target only wants to process
|
* dm_accept_partial_bio informs the dm that the target only wants to process
|
||||||
* additional n_sectors sectors of the bio and the rest of the data should be
|
* additional n_sectors sectors of the bio and the rest of the data should be
|
||||||
|
@ -1089,7 +1186,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
|
||||||
struct dm_target_io *tio = clone_to_tio(bio);
|
struct dm_target_io *tio = clone_to_tio(bio);
|
||||||
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
|
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
|
||||||
|
|
||||||
BUG_ON(bio->bi_opf & REQ_PREFLUSH);
|
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
|
||||||
BUG_ON(op_is_zone_mgmt(bio_op(bio)));
|
BUG_ON(op_is_zone_mgmt(bio_op(bio)));
|
||||||
BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
|
BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
|
||||||
BUG_ON(bi_size > *tio->len_ptr);
|
BUG_ON(bi_size > *tio->len_ptr);
|
||||||
|
@ -1100,6 +1197,56 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
|
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
|
||||||
|
|
||||||
|
static inline void __dm_submit_bio_remap(struct bio *clone,
|
||||||
|
dev_t dev, sector_t old_sector)
|
||||||
|
{
|
||||||
|
trace_block_bio_remap(clone, dev, old_sector);
|
||||||
|
submit_bio_noacct(clone);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @clone: clone bio that DM core passed to target's .map function
|
||||||
|
* @tgt_clone: clone of @clone bio that target needs submitted
|
||||||
|
*
|
||||||
|
* Targets should use this interface to submit bios they take
|
||||||
|
* ownership of when returning DM_MAPIO_SUBMITTED.
|
||||||
|
*
|
||||||
|
* Target should also enable ti->accounts_remapped_io
|
||||||
|
*/
|
||||||
|
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
|
||||||
|
{
|
||||||
|
struct dm_target_io *tio = clone_to_tio(clone);
|
||||||
|
struct dm_io *io = tio->io;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!tio->ti->accounts_remapped_io);
|
||||||
|
|
||||||
|
/* establish bio that will get submitted */
|
||||||
|
if (!tgt_clone)
|
||||||
|
tgt_clone = clone;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Account io->origin_bio to DM dev on behalf of target
|
||||||
|
* that took ownership of IO with DM_MAPIO_SUBMITTED.
|
||||||
|
*/
|
||||||
|
if (io->map_task == current) {
|
||||||
|
/* Still in target's map function */
|
||||||
|
dm_io_set_flag(io, DM_IO_START_ACCT);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Called by another thread, managed by DM target,
|
||||||
|
* wait for dm_split_and_process_bio() to store
|
||||||
|
* io->orig_bio
|
||||||
|
*/
|
||||||
|
while (unlikely(!smp_load_acquire(&io->orig_bio)))
|
||||||
|
msleep(1);
|
||||||
|
dm_start_io_acct(io, clone);
|
||||||
|
}
|
||||||
|
|
||||||
|
__dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk),
|
||||||
|
tio->old_sector);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
|
||||||
|
|
||||||
static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
|
static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
|
||||||
{
|
{
|
||||||
mutex_lock(&md->swap_bios_lock);
|
mutex_lock(&md->swap_bios_lock);
|
||||||
|
@ -1120,19 +1267,16 @@ static void __map_bio(struct bio *clone)
|
||||||
{
|
{
|
||||||
struct dm_target_io *tio = clone_to_tio(clone);
|
struct dm_target_io *tio = clone_to_tio(clone);
|
||||||
int r;
|
int r;
|
||||||
sector_t sector;
|
|
||||||
struct dm_io *io = tio->io;
|
struct dm_io *io = tio->io;
|
||||||
struct dm_target *ti = tio->ti;
|
struct dm_target *ti = tio->ti;
|
||||||
|
|
||||||
clone->bi_end_io = clone_endio;
|
clone->bi_end_io = clone_endio;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map the clone. If r == 0 we don't need to do
|
* Map the clone.
|
||||||
* anything, the target has assumed ownership of
|
|
||||||
* this io.
|
|
||||||
*/
|
*/
|
||||||
dm_io_inc_pending(io);
|
dm_io_inc_pending(io);
|
||||||
sector = clone->bi_iter.bi_sector;
|
tio->old_sector = clone->bi_iter.bi_sector;
|
||||||
|
|
||||||
if (unlikely(swap_bios_limit(ti, clone))) {
|
if (unlikely(swap_bios_limit(ti, clone))) {
|
||||||
struct mapped_device *md = io->md;
|
struct mapped_device *md = io->md;
|
||||||
|
@ -1154,26 +1298,27 @@ static void __map_bio(struct bio *clone)
|
||||||
|
|
||||||
switch (r) {
|
switch (r) {
|
||||||
case DM_MAPIO_SUBMITTED:
|
case DM_MAPIO_SUBMITTED:
|
||||||
|
/* target has assumed ownership of this io */
|
||||||
|
if (!ti->accounts_remapped_io)
|
||||||
|
dm_io_set_flag(io, DM_IO_START_ACCT);
|
||||||
break;
|
break;
|
||||||
case DM_MAPIO_REMAPPED:
|
case DM_MAPIO_REMAPPED:
|
||||||
/* the bio has been remapped so dispatch it */
|
/*
|
||||||
trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
|
* the bio has been remapped so dispatch it, but defer
|
||||||
submit_bio_noacct(clone);
|
* dm_start_io_acct() until after possible bio_split().
|
||||||
|
*/
|
||||||
|
__dm_submit_bio_remap(clone, disk_devt(io->md->disk),
|
||||||
|
tio->old_sector);
|
||||||
|
dm_io_set_flag(io, DM_IO_START_ACCT);
|
||||||
break;
|
break;
|
||||||
case DM_MAPIO_KILL:
|
case DM_MAPIO_KILL:
|
||||||
if (unlikely(swap_bios_limit(ti, clone))) {
|
|
||||||
struct mapped_device *md = io->md;
|
|
||||||
up(&md->swap_bios_semaphore);
|
|
||||||
}
|
|
||||||
free_tio(clone);
|
|
||||||
dm_io_dec_pending(io, BLK_STS_IOERR);
|
|
||||||
break;
|
|
||||||
case DM_MAPIO_REQUEUE:
|
case DM_MAPIO_REQUEUE:
|
||||||
if (unlikely(swap_bios_limit(ti, clone))) {
|
if (unlikely(swap_bios_limit(ti, clone)))
|
||||||
struct mapped_device *md = io->md;
|
up(&io->md->swap_bios_semaphore);
|
||||||
up(&md->swap_bios_semaphore);
|
|
||||||
}
|
|
||||||
free_tio(clone);
|
free_tio(clone);
|
||||||
|
if (r == DM_MAPIO_KILL)
|
||||||
|
dm_io_dec_pending(io, BLK_STS_IOERR);
|
||||||
|
else
|
||||||
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
|
dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -1182,31 +1327,6 @@ static void __map_bio(struct bio *clone)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
|
|
||||||
{
|
|
||||||
bio->bi_iter.bi_sector = sector;
|
|
||||||
bio->bi_iter.bi_size = to_bytes(len);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Creates a bio that consists of range of complete bvecs.
|
|
||||||
*/
|
|
||||||
static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
|
|
||||||
sector_t sector, unsigned *len)
|
|
||||||
{
|
|
||||||
struct bio *bio = ci->bio, *clone;
|
|
||||||
|
|
||||||
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
|
|
||||||
bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
|
|
||||||
clone->bi_iter.bi_size = to_bytes(*len);
|
|
||||||
|
|
||||||
if (bio_integrity(bio))
|
|
||||||
bio_integrity_trim(clone);
|
|
||||||
|
|
||||||
__map_bio(clone);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
|
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
|
||||||
struct dm_target *ti, unsigned num_bios,
|
struct dm_target *ti, unsigned num_bios,
|
||||||
unsigned *len)
|
unsigned *len)
|
||||||
|
@ -1248,22 +1368,20 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
|
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
|
||||||
if (len)
|
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
|
||||||
bio_setup_sector(clone, ci->sector, *len);
|
|
||||||
__map_bio(clone);
|
__map_bio(clone);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
alloc_multiple_bios(&blist, ci, ti, num_bios, len);
|
alloc_multiple_bios(&blist, ci, ti, num_bios, len);
|
||||||
while ((clone = bio_list_pop(&blist))) {
|
while ((clone = bio_list_pop(&blist))) {
|
||||||
if (len)
|
dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
|
||||||
bio_setup_sector(clone, ci->sector, *len);
|
|
||||||
__map_bio(clone);
|
__map_bio(clone);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __send_empty_flush(struct clone_info *ci)
|
static void __send_empty_flush(struct clone_info *ci)
|
||||||
{
|
{
|
||||||
unsigned target_nr = 0;
|
unsigned target_nr = 0;
|
||||||
struct dm_target *ti;
|
struct dm_target *ti;
|
||||||
|
@ -1280,37 +1398,28 @@ static int __send_empty_flush(struct clone_info *ci)
|
||||||
ci->bio = &flush_bio;
|
ci->bio = &flush_bio;
|
||||||
ci->sector_count = 0;
|
ci->sector_count = 0;
|
||||||
|
|
||||||
BUG_ON(bio_has_data(ci->bio));
|
|
||||||
while ((ti = dm_table_get_target(ci->map, target_nr++)))
|
while ((ti = dm_table_get_target(ci->map, target_nr++)))
|
||||||
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
|
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
|
||||||
|
|
||||||
bio_uninit(ci->bio);
|
bio_uninit(ci->bio);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
|
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
|
||||||
unsigned num_bios)
|
unsigned num_bios)
|
||||||
{
|
{
|
||||||
unsigned len;
|
unsigned len;
|
||||||
|
|
||||||
/*
|
|
||||||
* Even though the device advertised support for this type of
|
|
||||||
* request, that does not mean every target supports it, and
|
|
||||||
* reconfiguration might also have changed that since the
|
|
||||||
* check was performed.
|
|
||||||
*/
|
|
||||||
if (!num_bios)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
len = min_t(sector_t, ci->sector_count,
|
len = min_t(sector_t, ci->sector_count,
|
||||||
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
|
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
|
||||||
|
|
||||||
__send_duplicate_bios(ci, ti, num_bios, &len);
|
/*
|
||||||
|
* dm_accept_partial_bio cannot be used with duplicate bios,
|
||||||
|
* so update clone_info cursor before __send_duplicate_bios().
|
||||||
|
*/
|
||||||
ci->sector += len;
|
ci->sector += len;
|
||||||
ci->sector_count -= len;
|
ci->sector_count -= len;
|
||||||
|
|
||||||
return 0;
|
__send_duplicate_bios(ci, ti, num_bios, &len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_abnormal_io(struct bio *bio)
|
static bool is_abnormal_io(struct bio *bio)
|
||||||
|
@ -1332,10 +1441,9 @@ static bool is_abnormal_io(struct bio *bio)
|
||||||
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
|
static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
|
||||||
int *result)
|
int *result)
|
||||||
{
|
{
|
||||||
struct bio *bio = ci->bio;
|
|
||||||
unsigned num_bios = 0;
|
unsigned num_bios = 0;
|
||||||
|
|
||||||
switch (bio_op(bio)) {
|
switch (bio_op(ci->bio)) {
|
||||||
case REQ_OP_DISCARD:
|
case REQ_OP_DISCARD:
|
||||||
num_bios = ti->num_discard_bios;
|
num_bios = ti->num_discard_bios;
|
||||||
break;
|
break;
|
||||||
|
@ -1352,15 +1460,68 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
*result = __send_changing_extent_only(ci, ti, num_bios);
|
/*
|
||||||
|
* Even though the device advertised support for this type of
|
||||||
|
* request, that does not mean every target supports it, and
|
||||||
|
* reconfiguration might also have changed that since the
|
||||||
|
* check was performed.
|
||||||
|
*/
|
||||||
|
if (!num_bios)
|
||||||
|
*result = -EOPNOTSUPP;
|
||||||
|
else {
|
||||||
|
__send_changing_extent_only(ci, ti, num_bios);
|
||||||
|
*result = 0;
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reuse ->bi_private as hlist head for storing all dm_io instances
|
||||||
|
* associated with this bio, and this bio's bi_private needs to be
|
||||||
|
* stored in dm_io->data before the reuse.
|
||||||
|
*
|
||||||
|
* bio->bi_private is owned by fs or upper layer, so block layer won't
|
||||||
|
* touch it after splitting. Meantime it won't be changed by anyone after
|
||||||
|
* bio is submitted. So this reuse is safe.
|
||||||
|
*/
|
||||||
|
static inline struct hlist_head *dm_get_bio_hlist_head(struct bio *bio)
|
||||||
|
{
|
||||||
|
return (struct hlist_head *)&bio->bi_private;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
|
||||||
|
{
|
||||||
|
struct hlist_head *head = dm_get_bio_hlist_head(bio);
|
||||||
|
|
||||||
|
if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
|
||||||
|
bio->bi_opf |= REQ_DM_POLL_LIST;
|
||||||
|
/*
|
||||||
|
* Save .bi_private into dm_io, so that we can reuse
|
||||||
|
* .bi_private as hlist head for storing dm_io list
|
||||||
|
*/
|
||||||
|
io->data = bio->bi_private;
|
||||||
|
|
||||||
|
INIT_HLIST_HEAD(head);
|
||||||
|
|
||||||
|
/* tell block layer to poll for completion */
|
||||||
|
bio->bi_cookie = ~BLK_QC_T_NONE;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* bio recursed due to split, reuse original poll list,
|
||||||
|
* and save bio->bi_private too.
|
||||||
|
*/
|
||||||
|
io->data = hlist_entry(head->first, struct dm_io, node)->data;
|
||||||
|
}
|
||||||
|
|
||||||
|
hlist_add_head(&io->node, head);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Select the correct strategy for processing a non-flush bio.
|
* Select the correct strategy for processing a non-flush bio.
|
||||||
*/
|
*/
|
||||||
static int __split_and_process_non_flush(struct clone_info *ci)
|
static int __split_and_process_bio(struct clone_info *ci)
|
||||||
{
|
{
|
||||||
|
struct bio *clone;
|
||||||
struct dm_target *ti;
|
struct dm_target *ti;
|
||||||
unsigned len;
|
unsigned len;
|
||||||
int r;
|
int r;
|
||||||
|
@ -1372,11 +1533,15 @@ static int __split_and_process_non_flush(struct clone_info *ci)
|
||||||
if (__process_abnormal_io(ci, ti, &r))
|
if (__process_abnormal_io(ci, ti, &r))
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
|
/*
|
||||||
|
* Only support bio polling for normal IO, and the target io is
|
||||||
|
* exactly inside the dm_io instance (verified in dm_poll_dm_io)
|
||||||
|
*/
|
||||||
|
ci->submit_as_polled = ci->bio->bi_opf & REQ_POLLED;
|
||||||
|
|
||||||
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
|
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
|
||||||
if (r < 0)
|
clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
|
||||||
return r;
|
__map_bio(clone);
|
||||||
|
|
||||||
ci->sector += len;
|
ci->sector += len;
|
||||||
ci->sector_count -= len;
|
ci->sector_count -= len;
|
||||||
|
@ -1389,53 +1554,69 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
|
||||||
{
|
{
|
||||||
ci->map = map;
|
ci->map = map;
|
||||||
ci->io = alloc_io(md, bio);
|
ci->io = alloc_io(md, bio);
|
||||||
|
ci->bio = bio;
|
||||||
|
ci->submit_as_polled = false;
|
||||||
ci->sector = bio->bi_iter.bi_sector;
|
ci->sector = bio->bi_iter.bi_sector;
|
||||||
|
ci->sector_count = bio_sectors(bio);
|
||||||
|
|
||||||
|
/* Shouldn't happen but sector_count was being set to 0 so... */
|
||||||
|
if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
|
||||||
|
ci->sector_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Entry point to split a bio into clones and submit them to the targets.
|
* Entry point to split a bio into clones and submit them to the targets.
|
||||||
*/
|
*/
|
||||||
static void __split_and_process_bio(struct mapped_device *md,
|
static void dm_split_and_process_bio(struct mapped_device *md,
|
||||||
struct dm_table *map, struct bio *bio)
|
struct dm_table *map, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct clone_info ci;
|
struct clone_info ci;
|
||||||
|
struct bio *orig_bio = NULL;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
init_clone_info(&ci, md, map, bio);
|
init_clone_info(&ci, md, map, bio);
|
||||||
|
|
||||||
if (bio->bi_opf & REQ_PREFLUSH) {
|
if (bio->bi_opf & REQ_PREFLUSH) {
|
||||||
error = __send_empty_flush(&ci);
|
__send_empty_flush(&ci);
|
||||||
/* dm_io_dec_pending submits any data associated with flush */
|
/* dm_io_complete submits any data associated with flush */
|
||||||
} else if (op_is_zone_mgmt(bio_op(bio))) {
|
goto out;
|
||||||
ci.bio = bio;
|
}
|
||||||
ci.sector_count = 0;
|
|
||||||
error = __split_and_process_non_flush(&ci);
|
error = __split_and_process_bio(&ci);
|
||||||
} else {
|
ci.io->map_task = NULL;
|
||||||
ci.bio = bio;
|
if (error || !ci.sector_count)
|
||||||
ci.sector_count = bio_sectors(bio);
|
goto out;
|
||||||
error = __split_and_process_non_flush(&ci);
|
|
||||||
if (ci.sector_count && !error) {
|
|
||||||
/*
|
/*
|
||||||
* Remainder must be passed to submit_bio_noacct()
|
* Remainder must be passed to submit_bio_noacct() so it gets handled
|
||||||
* so that it gets handled *after* bios already submitted
|
* *after* bios already submitted have been completely processed.
|
||||||
* have been completely processed.
|
* We take a clone of the original to store in ci.io->orig_bio to be
|
||||||
* We take a clone of the original to store in
|
* used by dm_end_io_acct() and for dm_io_complete() to use for
|
||||||
* ci.io->orig_bio to be used by end_io_acct() and
|
* completion handling.
|
||||||
* for dec_pending to use for completion handling.
|
|
||||||
*/
|
*/
|
||||||
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
|
orig_bio = bio_split(bio, bio_sectors(bio) - ci.sector_count,
|
||||||
GFP_NOIO, &md->queue->bio_split);
|
GFP_NOIO, &md->queue->bio_split);
|
||||||
ci.io->orig_bio = b;
|
bio_chain(orig_bio, bio);
|
||||||
|
trace_block_split(orig_bio, bio->bi_iter.bi_sector);
|
||||||
bio_chain(b, bio);
|
|
||||||
trace_block_split(b, bio->bi_iter.bi_sector);
|
|
||||||
submit_bio_noacct(bio);
|
submit_bio_noacct(bio);
|
||||||
}
|
out:
|
||||||
}
|
if (!orig_bio)
|
||||||
start_io_acct(ci.io);
|
orig_bio = bio;
|
||||||
|
smp_store_release(&ci.io->orig_bio, orig_bio);
|
||||||
|
if (dm_io_flagged(ci.io, DM_IO_START_ACCT))
|
||||||
|
dm_start_io_acct(ci.io, NULL);
|
||||||
|
|
||||||
/* drop the extra reference count */
|
/*
|
||||||
|
* Drop the extra reference count for non-POLLED bio, and hold one
|
||||||
|
* reference for POLLED bio, which will be released in dm_poll_bio
|
||||||
|
*
|
||||||
|
* Add every dm_io instance into the hlist_head which is stored in
|
||||||
|
* bio->bi_private, so that dm_poll_bio can poll them all.
|
||||||
|
*/
|
||||||
|
if (error || !ci.submit_as_polled)
|
||||||
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
|
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
|
||||||
|
else
|
||||||
|
dm_queue_poll_io(bio, ci.io);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dm_submit_bio(struct bio *bio)
|
static void dm_submit_bio(struct bio *bio)
|
||||||
|
@ -1445,15 +1626,10 @@ static void dm_submit_bio(struct bio *bio)
|
||||||
struct dm_table *map;
|
struct dm_table *map;
|
||||||
|
|
||||||
map = dm_get_live_table(md, &srcu_idx);
|
map = dm_get_live_table(md, &srcu_idx);
|
||||||
if (unlikely(!map)) {
|
|
||||||
DMERR_LIMIT("%s: mapping table unavailable, erroring io",
|
|
||||||
dm_device_name(md));
|
|
||||||
bio_io_error(bio);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If suspended, queue this IO for later */
|
/* If suspended, or map not yet available, queue this IO for later */
|
||||||
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
|
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) ||
|
||||||
|
unlikely(!map)) {
|
||||||
if (bio->bi_opf & REQ_NOWAIT)
|
if (bio->bi_opf & REQ_NOWAIT)
|
||||||
bio_wouldblock_error(bio);
|
bio_wouldblock_error(bio);
|
||||||
else if (bio->bi_opf & REQ_RAHEAD)
|
else if (bio->bi_opf & REQ_RAHEAD)
|
||||||
|
@ -1470,11 +1646,72 @@ static void dm_submit_bio(struct bio *bio)
|
||||||
if (is_abnormal_io(bio))
|
if (is_abnormal_io(bio))
|
||||||
blk_queue_split(&bio);
|
blk_queue_split(&bio);
|
||||||
|
|
||||||
__split_and_process_bio(md, map, bio);
|
dm_split_and_process_bio(md, map, bio);
|
||||||
out:
|
out:
|
||||||
dm_put_live_table(md, srcu_idx);
|
dm_put_live_table(md, srcu_idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
|
||||||
|
unsigned int flags)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
|
||||||
|
|
||||||
|
/* don't poll if the mapped io is done */
|
||||||
|
if (atomic_read(&io->io_count) > 1)
|
||||||
|
bio_poll(&io->tio.clone, iob, flags);
|
||||||
|
|
||||||
|
/* bio_poll holds the last reference */
|
||||||
|
return atomic_read(&io->io_count) == 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
|
||||||
|
unsigned int flags)
|
||||||
|
{
|
||||||
|
struct hlist_head *head = dm_get_bio_hlist_head(bio);
|
||||||
|
struct hlist_head tmp = HLIST_HEAD_INIT;
|
||||||
|
struct hlist_node *next;
|
||||||
|
struct dm_io *io;
|
||||||
|
|
||||||
|
/* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
|
||||||
|
if (!(bio->bi_opf & REQ_DM_POLL_LIST))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
WARN_ON_ONCE(hlist_empty(head));
|
||||||
|
|
||||||
|
hlist_move_list(head, &tmp);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restore .bi_private before possibly completing dm_io.
|
||||||
|
*
|
||||||
|
* bio_poll() is only possible once @bio has been completely
|
||||||
|
* submitted via submit_bio_noacct()'s depth-first submission.
|
||||||
|
* So there is no dm_queue_poll_io() race associated with
|
||||||
|
* clearing REQ_DM_POLL_LIST here.
|
||||||
|
*/
|
||||||
|
bio->bi_opf &= ~REQ_DM_POLL_LIST;
|
||||||
|
bio->bi_private = hlist_entry(tmp.first, struct dm_io, node)->data;
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(io, next, &tmp, node) {
|
||||||
|
if (dm_poll_dm_io(io, iob, flags)) {
|
||||||
|
hlist_del_init(&io->node);
|
||||||
|
/*
|
||||||
|
* clone_endio() has already occurred, so passing
|
||||||
|
* error as 0 here doesn't override io->status
|
||||||
|
*/
|
||||||
|
dm_io_dec_pending(io, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Not done? */
|
||||||
|
if (!hlist_empty(&tmp)) {
|
||||||
|
bio->bi_opf |= REQ_DM_POLL_LIST;
|
||||||
|
/* Reset bio->bi_private to dm_io list head */
|
||||||
|
hlist_move_list(&tmp, head);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*-----------------------------------------------------------------
|
/*-----------------------------------------------------------------
|
||||||
* An IDR is used to keep track of allocated minor numbers.
|
* An IDR is used to keep track of allocated minor numbers.
|
||||||
*---------------------------------------------------------------*/
|
*---------------------------------------------------------------*/
|
||||||
|
@ -1557,6 +1794,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||||
md->dax_dev = NULL;
|
md->dax_dev = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dm_cleanup_zoned_dev(md);
|
||||||
if (md->disk) {
|
if (md->disk) {
|
||||||
spin_lock(&_minor_lock);
|
spin_lock(&_minor_lock);
|
||||||
md->disk->private_data = NULL;
|
md->disk->private_data = NULL;
|
||||||
|
@ -1569,6 +1807,11 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||||
blk_cleanup_disk(md->disk);
|
blk_cleanup_disk(md->disk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (md->pending_io) {
|
||||||
|
free_percpu(md->pending_io);
|
||||||
|
md->pending_io = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
cleanup_srcu_struct(&md->io_barrier);
|
cleanup_srcu_struct(&md->io_barrier);
|
||||||
|
|
||||||
mutex_destroy(&md->suspend_lock);
|
mutex_destroy(&md->suspend_lock);
|
||||||
|
@ -1577,7 +1820,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
||||||
mutex_destroy(&md->swap_bios_lock);
|
mutex_destroy(&md->swap_bios_lock);
|
||||||
|
|
||||||
dm_mq_cleanup_mapped_device(md);
|
dm_mq_cleanup_mapped_device(md);
|
||||||
dm_cleanup_zoned_dev(md);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1671,6 +1913,10 @@ static struct mapped_device *alloc_dev(int minor)
|
||||||
if (!md->wq)
|
if (!md->wq)
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
|
md->pending_io = alloc_percpu(unsigned long);
|
||||||
|
if (!md->pending_io)
|
||||||
|
goto bad;
|
||||||
|
|
||||||
dm_stats_init(&md->stats);
|
dm_stats_init(&md->stats);
|
||||||
|
|
||||||
/* Populate the mapping, nobody knows we exist yet */
|
/* Populate the mapping, nobody knows we exist yet */
|
||||||
|
@ -1780,8 +2026,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
||||||
struct queue_limits *limits)
|
struct queue_limits *limits)
|
||||||
{
|
{
|
||||||
struct dm_table *old_map;
|
struct dm_table *old_map;
|
||||||
struct request_queue *q = md->queue;
|
|
||||||
bool request_based = dm_table_request_based(t);
|
|
||||||
sector_t size;
|
sector_t size;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -1802,7 +2046,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
||||||
|
|
||||||
dm_table_event_callback(t, event_callback, md);
|
dm_table_event_callback(t, event_callback, md);
|
||||||
|
|
||||||
if (request_based) {
|
if (dm_table_request_based(t)) {
|
||||||
/*
|
/*
|
||||||
* Leverage the fact that request-based DM targets are
|
* Leverage the fact that request-based DM targets are
|
||||||
* immutable singletons - used to optimize dm_mq_queue_rq.
|
* immutable singletons - used to optimize dm_mq_queue_rq.
|
||||||
|
@ -1816,7 +2060,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = dm_table_set_restrictions(t, q, limits);
|
ret = dm_table_set_restrictions(t, md->queue, limits);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
old_map = ERR_PTR(ret);
|
old_map = ERR_PTR(ret);
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -1828,7 +2072,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
||||||
|
|
||||||
if (old_map)
|
if (old_map)
|
||||||
dm_sync_table(md);
|
dm_sync_table(md);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return old_map;
|
return old_map;
|
||||||
}
|
}
|
||||||
|
@ -2078,16 +2321,13 @@ void dm_put(struct mapped_device *md)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_put);
|
EXPORT_SYMBOL_GPL(dm_put);
|
||||||
|
|
||||||
static bool md_in_flight_bios(struct mapped_device *md)
|
static bool dm_in_flight_bios(struct mapped_device *md)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
struct block_device *part = dm_disk(md)->part0;
|
unsigned long sum = 0;
|
||||||
long sum = 0;
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu)
|
||||||
sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
|
sum += *per_cpu_ptr(md->pending_io, cpu);
|
||||||
sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
return sum != 0;
|
return sum != 0;
|
||||||
}
|
}
|
||||||
|
@ -2100,7 +2340,7 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
|
||||||
while (true) {
|
while (true) {
|
||||||
prepare_to_wait(&md->wait, &wait, task_state);
|
prepare_to_wait(&md->wait, &wait, task_state);
|
||||||
|
|
||||||
if (!md_in_flight_bios(md))
|
if (!dm_in_flight_bios(md))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (signal_pending_state(task_state, current)) {
|
if (signal_pending_state(task_state, current)) {
|
||||||
|
@ -2112,6 +2352,8 @@ static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int ta
|
||||||
}
|
}
|
||||||
finish_wait(&md->wait, &wait);
|
finish_wait(&md->wait, &wait);
|
||||||
|
|
||||||
|
smp_rmb();
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2283,11 +2525,11 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
|
||||||
/*
|
/*
|
||||||
* Here we must make sure that no processes are submitting requests
|
* Here we must make sure that no processes are submitting requests
|
||||||
* to target drivers i.e. no one may be executing
|
* to target drivers i.e. no one may be executing
|
||||||
* __split_and_process_bio from dm_submit_bio.
|
* dm_split_and_process_bio from dm_submit_bio.
|
||||||
*
|
*
|
||||||
* To get all processes out of __split_and_process_bio in dm_submit_bio,
|
* To get all processes out of dm_split_and_process_bio in dm_submit_bio,
|
||||||
* we take the write lock. To prevent any process from reentering
|
* we take the write lock. To prevent any process from reentering
|
||||||
* __split_and_process_bio from dm_submit_bio and quiesce the thread
|
* dm_split_and_process_bio from dm_submit_bio and quiesce the thread
|
||||||
* (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
|
* (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
|
||||||
* flush_workqueue(md->wq).
|
* flush_workqueue(md->wq).
|
||||||
*/
|
*/
|
||||||
|
@ -2895,6 +3137,7 @@ static const struct pr_ops dm_pr_ops = {
|
||||||
|
|
||||||
static const struct block_device_operations dm_blk_dops = {
|
static const struct block_device_operations dm_blk_dops = {
|
||||||
.submit_bio = dm_submit_bio,
|
.submit_bio = dm_submit_bio,
|
||||||
|
.poll_bio = dm_poll_bio,
|
||||||
.open = dm_blk_open,
|
.open = dm_blk_open,
|
||||||
.release = dm_blk_close,
|
.release = dm_blk_close,
|
||||||
.ioctl = dm_blk_ioctl,
|
.ioctl = dm_blk_ioctl,
|
||||||
|
|
|
@ -1457,6 +1457,8 @@ enum blk_unique_id {
|
||||||
|
|
||||||
struct block_device_operations {
|
struct block_device_operations {
|
||||||
void (*submit_bio)(struct bio *bio);
|
void (*submit_bio)(struct bio *bio);
|
||||||
|
int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
|
||||||
|
unsigned int flags);
|
||||||
int (*open) (struct block_device *, fmode_t);
|
int (*open) (struct block_device *, fmode_t);
|
||||||
void (*release) (struct gendisk *, fmode_t);
|
void (*release) (struct gendisk *, fmode_t);
|
||||||
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
|
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
|
||||||
|
|
|
@ -358,10 +358,16 @@ struct dm_target {
|
||||||
bool limit_swap_bios:1;
|
bool limit_swap_bios:1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set if this target implements a a zoned device and needs emulation of
|
* Set if this target implements a zoned device and needs emulation of
|
||||||
* zone append operations using regular writes.
|
* zone append operations using regular writes.
|
||||||
*/
|
*/
|
||||||
bool emulate_zone_append:1;
|
bool emulate_zone_append:1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set if the target will submit IO using dm_submit_bio_remap()
|
||||||
|
* after returning DM_MAPIO_SUBMITTED from its map function.
|
||||||
|
*/
|
||||||
|
bool accounts_remapped_io:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
void *dm_per_bio_data(struct bio *bio, size_t data_size);
|
void *dm_per_bio_data(struct bio *bio, size_t data_size);
|
||||||
|
@ -465,6 +471,7 @@ int dm_suspended(struct dm_target *ti);
|
||||||
int dm_post_suspending(struct dm_target *ti);
|
int dm_post_suspending(struct dm_target *ti);
|
||||||
int dm_noflush_suspending(struct dm_target *ti);
|
int dm_noflush_suspending(struct dm_target *ti);
|
||||||
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
|
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
|
||||||
|
void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone);
|
||||||
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
union map_info *dm_get_rq_mapinfo(struct request *rq);
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_ZONED
|
#ifdef CONFIG_BLK_DEV_ZONED
|
||||||
|
|
|
@ -286,9 +286,9 @@ enum {
|
||||||
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
|
||||||
|
|
||||||
#define DM_VERSION_MAJOR 4
|
#define DM_VERSION_MAJOR 4
|
||||||
#define DM_VERSION_MINOR 45
|
#define DM_VERSION_MINOR 46
|
||||||
#define DM_VERSION_PATCHLEVEL 0
|
#define DM_VERSION_PATCHLEVEL 0
|
||||||
#define DM_VERSION_EXTRA "-ioctl (2021-03-22)"
|
#define DM_VERSION_EXTRA "-ioctl (2022-02-22)"
|
||||||
|
|
||||||
/* Status bits */
|
/* Status bits */
|
||||||
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
|
||||||
|
|
Loading…
Reference in a new issue