From 3a150601e19254fda84d2da131f6ba1a2bdc113e Mon Sep 17 00:00:00 2001 From: Alexander Motin Date: Tue, 8 Aug 2017 19:36:34 +0000 Subject: [PATCH] Fix few issues of LinuxKPI workqueue. LinuxKPI workqueue wrappers reported "successful" cancellation for works already completed in normal way. This change brings reported status and real cancellation fact into sync. This required for drm-next operation. Reviewed by: hselasky (earlier version) Sponsored by: iXsystems, Inc. Differential Revision: https://reviews.freebsd.org/D11904 --- sys/compat/linuxkpi/common/src/linux_work.c | 58 ++++++++++++++------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/sys/compat/linuxkpi/common/src/linux_work.c b/sys/compat/linuxkpi/common/src/linux_work.c index e0aa4e6ff599..36c2ebad4f9b 100644 --- a/sys/compat/linuxkpi/common/src/linux_work.c +++ b/sys/compat/linuxkpi/common/src/linux_work.c @@ -214,7 +214,7 @@ linux_work_fn(void *context, int pending) [WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */ [WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */ [WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */ - [WORK_ST_CANCEL] = WORK_ST_IDLE, /* complete cancel */ + [WORK_ST_CANCEL] = WORK_ST_EXEC, /* failed to cancel */ }; struct work_struct *work; struct workqueue_struct *wq; @@ -236,6 +236,7 @@ linux_work_fn(void *context, int pending) switch (linux_update_state(&work->state, states)) { case WORK_ST_TIMER: case WORK_ST_TASK: + case WORK_ST_CANCEL: WQ_EXEC_UNLOCK(wq); /* call work function */ @@ -266,13 +267,14 @@ linux_delayed_work_timer_fn(void *arg) [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ [WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */ [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */ - [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */ - [WORK_ST_CANCEL] = WORK_ST_IDLE, /* complete cancel */ + [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */ + [WORK_ST_CANCEL] = WORK_ST_TASK, /* failed to cancel */ }; struct delayed_work *dwork = arg; switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_TIMER: + case WORK_ST_CANCEL: linux_delayed_work_enqueue(dwork); break; default: @@ -290,10 +292,10 @@ linux_cancel_work_sync(struct work_struct *work) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ - [WORK_ST_TIMER] = WORK_ST_IDLE, /* idle */ - [WORK_ST_TASK] = WORK_ST_IDLE, /* idle */ - [WORK_ST_EXEC] = WORK_ST_IDLE, /* idle */ - [WORK_ST_CANCEL] = WORK_ST_IDLE, /* idle */ + [WORK_ST_TIMER] = WORK_ST_TIMER, /* can't happen */ + [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */ + [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */ + [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */ }; struct taskqueue *tq; @@ -302,6 +304,12 @@ linux_cancel_work_sync(struct work_struct *work) switch (linux_update_state(&work->state, states)) { case WORK_ST_IDLE: + case WORK_ST_TIMER: + return (0); + case WORK_ST_EXEC: + tq = work->work_queue->taskqueue; + if (taskqueue_cancel(tq, &work->work_task, NULL) != 0) + taskqueue_drain(tq, &work->work_task); return (0); default: tq = work->work_queue->taskqueue; @@ -343,23 +351,29 @@ linux_cancel_delayed_work(struct delayed_work *dwork) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ - [WORK_ST_TIMER] = WORK_ST_CANCEL, /* cancel */ - [WORK_ST_TASK] = WORK_ST_CANCEL, /* cancel */ - [WORK_ST_EXEC] = WORK_ST_CANCEL, /* cancel */ - [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* cancel */ + [WORK_ST_TIMER] = WORK_ST_CANCEL, /* try to cancel */ + [WORK_ST_TASK] = WORK_ST_CANCEL, /* try to cancel */ + [WORK_ST_EXEC] = WORK_ST_EXEC, /* NOP */ + [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* NOP */ }; struct taskqueue *tq; switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_TIMER: - if (linux_cancel_timer(dwork, 0)) + case WORK_ST_CANCEL: + if (linux_cancel_timer(dwork, 0)) { + atomic_cmpxchg(&dwork->work.state, + WORK_ST_CANCEL, WORK_ST_IDLE); return (1); + } /* FALLTHROUGH */ case WORK_ST_TASK: - case WORK_ST_EXEC: tq = dwork->work.work_queue->taskqueue; - if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) + if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0) { + atomic_cmpxchg(&dwork->work.state, + WORK_ST_CANCEL, WORK_ST_IDLE); return (1); + } /* FALLTHROUGH */ default: return (0); @@ -376,10 +390,10 @@ linux_cancel_delayed_work_sync(struct delayed_work *dwork) { static const uint8_t states[WORK_ST_MAX] __aligned(8) = { [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */ - [WORK_ST_TIMER] = WORK_ST_IDLE, /* idle */ - [WORK_ST_TASK] = WORK_ST_IDLE, /* idle */ - [WORK_ST_EXEC] = WORK_ST_IDLE, /* idle */ - [WORK_ST_CANCEL] = WORK_ST_IDLE, /* idle */ + [WORK_ST_TIMER] = WORK_ST_IDLE, /* cancel and drain */ + [WORK_ST_TASK] = WORK_ST_IDLE, /* cancel and drain */ + [WORK_ST_EXEC] = WORK_ST_IDLE, /* too late, drain */ + [WORK_ST_CANCEL] = WORK_ST_IDLE, /* cancel and drain */ }; struct taskqueue *tq; @@ -389,7 +403,13 @@ linux_cancel_delayed_work_sync(struct delayed_work *dwork) switch (linux_update_state(&dwork->work.state, states)) { case WORK_ST_IDLE: return (0); + case WORK_ST_EXEC: + tq = dwork->work.work_queue->taskqueue; + if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) != 0) + taskqueue_drain(tq, &dwork->work.work_task); + return (0); case WORK_ST_TIMER: + case WORK_ST_CANCEL: if (linux_cancel_timer(dwork, 1)) { /* * Make sure taskqueue is also drained before @@ -468,6 +488,7 @@ linux_work_pending(struct work_struct *work) switch (atomic_read(&work->state)) { case WORK_ST_TIMER: case WORK_ST_TASK: + case WORK_ST_CANCEL: return (1); default: return (0); @@ -486,7 +507,6 @@ linux_work_busy(struct work_struct *work) case WORK_ST_IDLE: return (0); case WORK_ST_EXEC: - case WORK_ST_CANCEL: tq = work->work_queue->taskqueue; return (taskqueue_poll_is_busy(tq, &work->work_task)); default: