writeback: remove bdi_start_writeback()

bdi_start_writeback() is a thin wrapper on top of
__wb_start_writeback() which is used only by laptop_mode_timer_fn().
This patches removes bdi_start_writeback(), renames
__wb_start_writeback() to wb_start_writeback() and makes
laptop_mode_timer_fn() use it instead.

This doesn't cause any functional difference and will ease making
laptop_mode_timer_fn() cgroup writeback aware.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Tejun Heo 2015-05-22 17:13:51 -04:00 committed by Jens Axboe
parent ebe41ab0c7
commit c00ddad39f
3 changed files with 29 additions and 47 deletions

View file

@ -184,33 +184,6 @@ static void wb_queue_work(struct bdi_writeback *wb,
spin_unlock_bh(&wb->work_lock);
}
static void __wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason)
{
struct wb_writeback_work *work;
if (!wb_has_dirty_io(wb))
return;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
trace_writeback_nowork(wb->bdi);
wb_wakeup(wb);
return;
}
work->sync_mode = WB_SYNC_NONE;
work->nr_pages = nr_pages;
work->range_cyclic = range_cyclic;
work->reason = reason;
wb_queue_work(wb, work);
}
#ifdef CONFIG_CGROUP_WRITEBACK
/**
@ -240,22 +213,31 @@ EXPORT_SYMBOL_GPL(inode_congested);
#endif /* CONFIG_CGROUP_WRITEBACK */
/**
* bdi_start_writeback - start writeback
* @bdi: the backing device to write from
* @nr_pages: the number of pages to write
* @reason: reason why some writeback work was initiated
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
* started when this function returns, we make no guarantees on
* completion. Caller need not hold sb s_umount semaphore.
*
*/
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason)
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason)
{
__wb_start_writeback(&bdi->wb, nr_pages, true, reason);
struct wb_writeback_work *work;
if (!wb_has_dirty_io(wb))
return;
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
*/
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
trace_writeback_nowork(wb->bdi);
wb_wakeup(wb);
return;
}
work->sync_mode = WB_SYNC_NONE;
work->nr_pages = nr_pages;
work->range_cyclic = range_cyclic;
work->reason = reason;
wb_queue_work(wb, work);
}
/**
@ -1219,7 +1201,7 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
rcu_read_lock();
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
__wb_start_writeback(&bdi->wb, nr_pages, false, reason);
wb_start_writeback(&bdi->wb, nr_pages, false, reason);
rcu_read_unlock();
}

View file

@ -25,8 +25,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
void bdi_unregister(struct backing_dev_info *bdi);
int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
enum wb_reason reason);
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
void bdi_start_background_writeback(struct backing_dev_info *bdi);
void wb_workfn(struct work_struct *work);
void wb_wakeup_delayed(struct bdi_writeback *wb);

View file

@ -1729,8 +1729,8 @@ void laptop_mode_timer_fn(unsigned long data)
* threshold
*/
if (bdi_has_dirty_io(&q->backing_dev_info))
bdi_start_writeback(&q->backing_dev_info, nr_pages,
WB_REASON_LAPTOP_TIMER);
wb_start_writeback(&q->backing_dev_info.wb, nr_pages, true,
WB_REASON_LAPTOP_TIMER);
}
/*