block: check that there is a plug in blk_flush_plug

Rename blk_flush_plug to __blk_flush_plug and add a wrapper that includes
the NULL check instead of open coding that check everywhere.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220127070549.1377856-2-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2022-01-27 08:05:49 +01:00 committed by Jens Axboe
parent b1f866b013
commit aa8dcccaf3
4 changed files with 13 additions and 14 deletions

View file

@ -991,8 +991,7 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
if (current->plug)
blk_flush_plug(current->plug, false);
blk_flush_plug(current->plug, false);
if (blk_queue_enter(q, BLK_MQ_REQ_NOWAIT))
return 0;
@ -1274,7 +1273,7 @@ struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
}
EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
{
if (!list_empty(&plug->cb_list))
flush_plug_callbacks(plug, from_schedule);
@ -1303,7 +1302,7 @@ void blk_flush_plug(struct blk_plug *plug, bool from_schedule)
void blk_finish_plug(struct blk_plug *plug)
{
if (plug == current->plug) {
blk_flush_plug(plug, false);
__blk_flush_plug(plug, false);
current->plug = NULL;
}
}

View file

@ -1903,8 +1903,7 @@ static long writeback_sb_inodes(struct super_block *sb,
* unplug, so get our IOs out the door before we
* give up the CPU.
*/
if (current->plug)
blk_flush_plug(current->plug, false);
blk_flush_plug(current->plug, false);
cond_resched();
}
@ -2301,8 +2300,7 @@ void wakeup_flusher_threads(enum wb_reason reason)
/*
* If we are expecting writeback progress we must submit plugged IO.
*/
if (current->plug)
blk_flush_plug(current->plug, true);
blk_flush_plug(current->plug, true);
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)

View file

@ -1053,7 +1053,12 @@ extern void blk_start_plug(struct blk_plug *);
extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
void blk_flush_plug(struct blk_plug *plug, bool from_schedule);
void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
if (plug)
__blk_flush_plug(plug, async);
}
int blkdev_issue_flush(struct block_device *bdev);
long nr_blockdev_pages(void);

View file

@ -6344,8 +6344,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
*/
if (tsk->plug)
blk_flush_plug(tsk->plug, true);
blk_flush_plug(tsk->plug, true);
}
static void sched_update_worker(struct task_struct *tsk)
@ -8371,9 +8370,7 @@ int io_schedule_prepare(void)
int old_iowait = current->in_iowait;
current->in_iowait = 1;
if (current->plug)
blk_flush_plug(current->plug, true);
blk_flush_plug(current->plug, true);
return old_iowait;
}