sleepqueue: Remove kernel stack swapping support, part 10

- Remove kick_proc0().
- Make the return type of sleepq_broadcast(), sleepq_signal(), etc.,
  void.
- Fix up callers.

Tested by:	pho
Reviewed by:	kib
Differential Revision:	https://reviews.freebsd.org/D46128
This commit is contained in:
Mark Johnston 2024-07-29 01:42:19 +00:00
parent d4c4ca856b
commit 01518f5eed
11 changed files with 57 additions and 140 deletions

View file

@ -2068,20 +2068,16 @@ SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
void
linux_complete_common(struct completion *c, int all)
{
int wakeup_swapper;
sleepq_lock(c);
if (all) {
c->done = UINT_MAX;
wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
} else {
if (c->done != UINT_MAX)
c->done++;
wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
}
sleepq_release(c);
if (wakeup_swapper)
kick_proc0();
}
/*

View file

@ -98,18 +98,16 @@ linux_msleep_interruptible(unsigned int ms)
static int
wake_up_task(struct task_struct *task, unsigned int state)
{
int ret, wakeup_swapper;
int ret;
ret = wakeup_swapper = 0;
ret = 0;
sleepq_lock(task);
if ((atomic_read(&task->state) & state) != 0) {
set_task_state(task, TASK_WAKING);
wakeup_swapper = sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
sleepq_signal(task, SLEEPQ_SLEEP, 0, 0);
ret = 1;
}
sleepq_release(task);
if (wakeup_swapper)
kick_proc0();
return (ret);
}
@ -330,13 +328,9 @@ linux_schedule_timeout(int timeout)
static void
wake_up_sleepers(void *wchan)
{
int wakeup_swapper;
sleepq_lock(wchan);
wakeup_swapper = sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
sleepq_signal(wchan, SLEEPQ_SLEEP, 0, 0);
sleepq_release(wchan);
if (wakeup_swapper)
kick_proc0();
}
#define bit_to_wchan(word, bit) ((void *)(((uintptr_t)(word) << 6) | (bit)))

View file

@ -28,17 +28,13 @@ static struct workqueue_struct *device_reset_wq;
void
linux_complete_common(struct completion *c, int all)
{
int wakeup_swapper;
sleepq_lock(c);
c->done++;
if (all)
wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
else
wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
sleepq_release(c);
if (wakeup_swapper)
kick_proc0();
}
/* reset dev data */

View file

@ -427,16 +427,13 @@ _cv_timedwait_sig_sbt(struct cv *cvp, struct lock_object *lock,
}
/*
* Signal a condition variable, wakes up one waiting thread. Will also wakeup
* the swapper if the process is not in memory, so that it can bring the
* sleeping process in. Note that this may also result in additional threads
* being made runnable. Should be called with the same mutex as was passed to
* cv_wait held.
* Signal a condition variable, wakes up one waiting thread. Note that this may
* also result in additional threads being made runnable. Should be called with
* the same mutex as was passed to cv_wait held.
*/
void
cv_signal(struct cv *cvp)
{
if (cvp->cv_waiters == 0)
return;
sleepq_lock(cvp);
@ -450,8 +447,7 @@ cv_signal(struct cv *cvp)
} else {
if (cvp->cv_waiters < CV_WAITERS_BOUND)
cvp->cv_waiters--;
if (sleepq_signal(cvp, SLEEPQ_CONDVAR | SLEEPQ_DROP, 0, 0))
kick_proc0();
sleepq_signal(cvp, SLEEPQ_CONDVAR | SLEEPQ_DROP, 0, 0);
}
}
@ -462,23 +458,18 @@ cv_signal(struct cv *cvp)
void
cv_broadcastpri(struct cv *cvp, int pri)
{
int wakeup_swapper;
if (cvp->cv_waiters == 0)
return;
/*
* XXX sleepq_broadcast pri argument changed from -1 meaning
* no pri to 0 meaning no pri.
*/
wakeup_swapper = 0;
if (pri == -1)
pri = 0;
sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters = 0;
wakeup_swapper = sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
}
sleepq_release(cvp);
if (wakeup_swapper)
kick_proc0();
}

View file

@ -186,7 +186,7 @@ static __always_inline bool lockmgr_sunlock_try(struct lock *lk,
uintptr_t *xp);
static void
lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
lockmgr_exit(u_int flags, struct lock_object *ilk)
{
struct lock_class *class;
@ -194,9 +194,6 @@ lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
class = LOCK_CLASS(ilk);
class->lc_unlock(ilk);
}
if (__predict_false(wakeup_swapper))
kick_proc0();
}
static void
@ -310,14 +307,13 @@ sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
return (error);
}
static __inline int
static __inline void
wakeupshlk(struct lock *lk, const char *file, int line)
{
uintptr_t v, x, orig_x;
u_int realexslp;
int queue, wakeup_swapper;
int queue;
wakeup_swapper = 0;
for (;;) {
x = lockmgr_read_value(lk);
if (lockmgr_sunlock_try(lk, &x))
@ -361,9 +357,8 @@ wakeupshlk(struct lock *lk, const char *file, int line)
LOCK_LOG2(lk,
"%s: %p waking up threads on the exclusive queue",
__func__, lk);
wakeup_swapper =
sleepq_broadcast(&lk->lock_object,
SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
SQ_EXCLUSIVE_QUEUE);
queue = SQ_SHARED_QUEUE;
}
} else {
@ -390,14 +385,12 @@ wakeupshlk(struct lock *lk, const char *file, int line)
LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
__func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
0, queue);
sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
sleepq_release(&lk->lock_object);
break;
}
LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
return (wakeup_swapper);
}
static void
@ -730,7 +723,7 @@ lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
}
out:
lockmgr_exit(flags, ilk, 0);
lockmgr_exit(flags, ilk);
return (error);
}
@ -968,7 +961,7 @@ lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
}
out:
lockmgr_exit(flags, ilk, 0);
lockmgr_exit(flags, ilk);
return (error);
}
@ -1028,7 +1021,7 @@ lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
flags &= ~LK_INTERLOCK;
out:
lockmgr_exit(flags, ilk, 0);
lockmgr_exit(flags, ilk);
return (error);
}
@ -1100,17 +1093,10 @@ lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
static __noinline int
lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
const char *file, int line)
{
int wakeup_swapper = 0;
if (SCHEDULER_STOPPED())
goto out;
wakeup_swapper = wakeupshlk(lk, file, line);
out:
lockmgr_exit(flags, ilk, wakeup_swapper);
if (!SCHEDULER_STOPPED())
wakeupshlk(lk, file, line);
lockmgr_exit(flags, ilk);
return (0);
}
@ -1119,7 +1105,6 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
const char *file, int line)
{
uintptr_t tid, v;
int wakeup_swapper = 0;
u_int realexslp;
int queue;
@ -1188,8 +1173,8 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
LOCK_LOG2(lk,
"%s: %p waking up threads on the exclusive queue",
__func__, lk);
wakeup_swapper = sleepq_broadcast(&lk->lock_object,
SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
SQ_EXCLUSIVE_QUEUE);
queue = SQ_SHARED_QUEUE;
}
} else {
@ -1207,11 +1192,11 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
__func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
atomic_store_rel_ptr(&lk->lk_lock, v);
wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
sleepq_release(&lk->lock_object);
out:
lockmgr_exit(flags, ilk, wakeup_swapper);
lockmgr_exit(flags, ilk);
return (0);
}
@ -1309,7 +1294,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *iwmesg;
uintptr_t tid, v, x;
u_int op, realexslp;
int error, ipri, itimo, queue, wakeup_swapper;
int error, ipri, itimo, queue;
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
int contested = 0;
@ -1361,7 +1346,6 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
}
}
wakeup_swapper = 0;
switch (op) {
case LK_SHARED:
return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
@ -1519,8 +1503,7 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
LOCK_LOG2(lk,
"%s: %p waking up threads on the exclusive queue",
__func__, lk);
wakeup_swapper =
sleepq_broadcast(
sleepq_broadcast(
&lk->lock_object,
SLEEPQ_LK, 0,
SQ_EXCLUSIVE_QUEUE);
@ -1536,8 +1519,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
"%s: %p waking up all threads on the %s queue",
__func__, lk, queue == SQ_SHARED_QUEUE ?
"shared" : "exclusive");
wakeup_swapper |= sleepq_broadcast(
&lk->lock_object, SLEEPQ_LK, 0, queue);
sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0,
queue);
/*
* If shared waiters have been woken up we need
@ -1604,8 +1587,6 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
if (flags & LK_INTERLOCK)
class->lc_unlock(ilk);
if (wakeup_swapper)
kick_proc0();
return (error);
}

View file

@ -474,7 +474,6 @@ void
sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t x;
int wakeup_swapper;
if (SCHEDULER_STOPPED())
return;
@ -516,18 +515,14 @@ sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF)
* Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
* shared lock. If there are any shared waiters, wake them up.
*/
wakeup_swapper = 0;
x = sx->sx_lock;
atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
(x & SX_LOCK_EXCLUSIVE_WAITERS));
if (x & SX_LOCK_SHARED_WAITERS)
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
0, SQ_SHARED_QUEUE);
sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
SQ_SHARED_QUEUE);
sleepq_release(&sx->lock_object);
if (wakeup_swapper)
kick_proc0();
out:
curthread->td_sx_slocks++;
LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
@ -920,7 +915,7 @@ void
_sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
{
uintptr_t tid, setx;
int queue, wakeup_swapper;
int queue;
if (SCHEDULER_STOPPED())
return;
@ -977,11 +972,8 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
__func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
"exclusive");
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0,
queue);
sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
sleepq_release(&sx->lock_object);
if (wakeup_swapper)
kick_proc0();
}
static __always_inline bool
@ -1333,7 +1325,6 @@ static void __noinline
_sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
LOCK_FILE_LINE_ARG_DEF)
{
int wakeup_swapper = 0;
uintptr_t setx, queue;
if (SCHEDULER_STOPPED())
@ -1366,14 +1357,11 @@ _sx_sunlock_hard(struct sx *sx, struct thread *td, uintptr_t x
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p waking up all thread on"
"exclusive queue", __func__, sx);
wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX,
0, queue);
sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
td->td_sx_slocks--;
break;
}
sleepq_release(&sx->lock_object);
if (wakeup_swapper)
kick_proc0();
out_lockstat:
LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER);
}

View file

@ -344,16 +344,9 @@ pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
void
wakeup(const void *ident)
{
int wakeup_swapper;
sleepq_lock(ident);
wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
sleepq_release(ident);
if (wakeup_swapper) {
KASSERT(ident != &proc0,
("wakeup and wakeup_swapper and proc0"));
kick_proc0();
}
}
/*
@ -364,24 +357,15 @@ wakeup(const void *ident)
void
wakeup_one(const void *ident)
{
int wakeup_swapper;
sleepq_lock(ident);
wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_DROP, 0, 0);
if (wakeup_swapper)
kick_proc0();
sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_DROP, 0, 0);
}
void
wakeup_any(const void *ident)
{
int wakeup_swapper;
sleepq_lock(ident);
wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR |
SLEEPQ_DROP, 0, 0);
if (wakeup_swapper)
kick_proc0();
sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR | SLEEPQ_DROP, 0, 0);
}
/*

View file

@ -920,7 +920,7 @@ sleepq_init(void *mem, int size, int flags)
/*
* Find thread sleeping on a wait channel and resume it.
*/
int
void
sleepq_signal(const void *wchan, int flags, int pri, int queue)
{
struct sleepqueue_chain *sc;
@ -935,7 +935,7 @@ sleepq_signal(const void *wchan, int flags, int pri, int queue)
if (sq == NULL) {
if (flags & SLEEPQ_DROP)
sleepq_release(wchan);
return (0);
return;
}
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
@ -971,7 +971,6 @@ sleepq_signal(const void *wchan, int flags, int pri, int queue)
MPASS(besttd != NULL);
sleepq_resume_thread(sq, besttd, pri,
(flags & SLEEPQ_DROP) ? 0 : SRQ_HOLD);
return (0);
}
static bool
@ -984,7 +983,7 @@ match_any(struct thread *td __unused)
/*
* Resume all threads sleeping on a specified wait channel.
*/
int
void
sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
{
struct sleepqueue *sq;
@ -993,18 +992,18 @@ sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sq = sleepq_lookup(wchan);
if (sq == NULL)
return (0);
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
if (sq != NULL) {
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
("%s: mismatch between sleep/wakeup and cv_*", __func__));
return (sleepq_remove_matching(sq, queue, match_any, pri));
sleepq_remove_matching(sq, queue, match_any, pri);
}
}
/*
* Resume threads on the sleep queue that match the given predicate.
*/
int
void
sleepq_remove_matching(struct sleepqueue *sq, int queue,
bool (*matches)(struct thread *), int pri)
{
@ -1020,8 +1019,6 @@ sleepq_remove_matching(struct sleepqueue *sq, int queue,
if (matches(td))
sleepq_resume_thread(sq, td, pri, SRQ_HOLD);
}
return (0);
}
/*
@ -1113,7 +1110,7 @@ sleepq_remove(struct thread *td, const void *wchan)
*
* Requires thread lock on entry, releases on return.
*/
int
void
sleepq_abort(struct thread *td, int intrval)
{
struct sleepqueue *sq;
@ -1131,7 +1128,7 @@ sleepq_abort(struct thread *td, int intrval)
*/
if (td->td_flags & TDF_TIMEOUT) {
thread_unlock(td);
return (0);
return;
}
CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
@ -1145,7 +1142,7 @@ sleepq_abort(struct thread *td, int intrval)
*/
if (!TD_IS_SLEEPING(td)) {
thread_unlock(td);
return (0);
return;
}
wchan = td->td_wchan;
MPASS(wchan != NULL);
@ -1154,7 +1151,6 @@ sleepq_abort(struct thread *td, int intrval)
/* Thread is asleep on sleep queue sq, so wake it up. */
sleepq_resume_thread(sq, td, 0, 0);
return (0);
}
void

View file

@ -1161,7 +1161,6 @@ void kqtimer_proc_continue(struct proc *p);
void kern_proc_vmmap_resident(struct vm_map *map, struct vm_map_entry *entry,
int *resident_count, bool *super);
void kern_yield(int);
void kick_proc0(void);
void killjobc(void);
int leavepgrp(struct proc *p);
int maybe_preempt(struct thread *td);

View file

@ -86,21 +86,21 @@ struct thread;
#define SLEEPQ_DROP 0x400 /* Return without lock held. */
void init_sleepqueues(void);
int sleepq_abort(struct thread *td, int intrval);
void sleepq_abort(struct thread *td, int intrval);
void sleepq_add(const void *wchan, struct lock_object *lock,
const char *wmesg, int flags, int queue);
struct sleepqueue *sleepq_alloc(void);
int sleepq_broadcast(const void *wchan, int flags, int pri, int queue);
void sleepq_broadcast(const void *wchan, int flags, int pri, int queue);
void sleepq_chains_remove_matching(bool (*matches)(struct thread *));
void sleepq_free(struct sleepqueue *sq);
void sleepq_lock(const void *wchan);
struct sleepqueue *sleepq_lookup(const void *wchan);
void sleepq_release(const void *wchan);
void sleepq_remove(struct thread *td, const void *wchan);
int sleepq_remove_matching(struct sleepqueue *sq, int queue,
void sleepq_remove_matching(struct sleepqueue *sq, int queue,
bool (*matches)(struct thread *), int pri);
void sleepq_remove_nested(struct thread *td);
int sleepq_signal(const void *wchan, int flags, int pri, int queue);
void sleepq_signal(const void *wchan, int flags, int pri, int queue);
void sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt,
sbintime_t pr, int flags);
#define sleepq_set_timeout(wchan, timo) \

View file

@ -836,11 +836,3 @@ vm_waitproc(struct proc *p)
vmspace_exitfree(p); /* and clean-out the vmspace */
}
/*
* This used to kick the thread which faults in threads.
*/
void
kick_proc0(void)
{
}