mirror of
https://github.com/torvalds/linux
synced 2024-07-23 03:29:48 +00:00
io-wq: kill now unused io_wq_cancel_all()
io_uring no longer issues full cancelations on the io-wq, so remove any remnants of this code and the IO_WQ_BIT_CANCEL flag. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
00c18640c2
commit
446bc1c207
30
fs/io-wq.c
30
fs/io-wq.c
|
@ -36,8 +36,7 @@ enum {
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
IO_WQ_BIT_EXIT = 0, /* wq exiting */
|
IO_WQ_BIT_EXIT = 0, /* wq exiting */
|
||||||
IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
|
IO_WQ_BIT_ERROR = 1, /* error on setup */
|
||||||
IO_WQ_BIT_ERROR = 2, /* error on setup */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
|
@ -561,12 +560,6 @@ static void io_worker_handle_work(struct io_worker *worker)
|
||||||
|
|
||||||
next_hashed = wq_next_work(work);
|
next_hashed = wq_next_work(work);
|
||||||
io_impersonate_work(worker, work);
|
io_impersonate_work(worker, work);
|
||||||
/*
|
|
||||||
* OK to set IO_WQ_WORK_CANCEL even for uncancellable
|
|
||||||
* work, the worker function will do the right thing.
|
|
||||||
*/
|
|
||||||
if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
|
|
||||||
work->flags |= IO_WQ_WORK_CANCEL;
|
|
||||||
|
|
||||||
old_work = work;
|
old_work = work;
|
||||||
linked = wq->do_work(work);
|
linked = wq->do_work(work);
|
||||||
|
@ -732,12 +725,6 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
|
||||||
return acct->nr_workers < acct->max_workers;
|
return acct->nr_workers < acct->max_workers;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
|
|
||||||
{
|
|
||||||
send_sig(SIGINT, worker->task, 1);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Iterate the passed in list and call the specific function for each
|
* Iterate the passed in list and call the specific function for each
|
||||||
* worker that isn't exiting
|
* worker that isn't exiting
|
||||||
|
@ -938,21 +925,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
|
||||||
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
|
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_wq_cancel_all(struct io_wq *wq)
|
|
||||||
{
|
|
||||||
int node;
|
|
||||||
|
|
||||||
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
for_each_node(node) {
|
|
||||||
struct io_wqe *wqe = wq->wqes[node];
|
|
||||||
|
|
||||||
io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
|
|
||||||
}
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
|
||||||
|
|
||||||
struct io_cb_cancel_data {
|
struct io_cb_cancel_data {
|
||||||
work_cancel_fn *fn;
|
work_cancel_fn *fn;
|
||||||
void *data;
|
void *data;
|
||||||
|
|
|
@ -129,8 +129,6 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work)
|
||||||
return work->flags & IO_WQ_WORK_HASHED;
|
return work->flags & IO_WQ_WORK_HASHED;
|
||||||
}
|
}
|
||||||
|
|
||||||
void io_wq_cancel_all(struct io_wq *wq);
|
|
||||||
|
|
||||||
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
|
||||||
|
|
||||||
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
|
||||||
|
|
Loading…
Reference in a new issue