Actually, for signal and thread suspension, extra process spin lock is

unnecessary, the normal process lock and thread lock are enough. The
spin lock is still needed for process and thread exiting to mimic
single sched_lock.
This commit is contained in:
David Xu 2008-10-23 07:55:38 +00:00
parent b1efbd6b47
commit 3f9be10eb0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=184199
6 changed files with 22 additions and 61 deletions

View file

@ -2115,19 +2115,15 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* Otherwise, process goes back to sleep state.
*/
p->p_flag &= ~P_STOPPED_SIG;
PROC_SLOCK(p);
if (p->p_numthreads == p->p_suspcount) {
PROC_SUNLOCK(p);
p->p_flag |= P_CONTINUED;
p->p_xstat = SIGCONT;
PROC_LOCK(p->p_pptr);
childproc_continued(p);
PROC_UNLOCK(p->p_pptr);
PROC_SLOCK(p);
}
if (action == SIG_DFL) {
thread_unsuspend(p);
PROC_SUNLOCK(p);
sigqueue_delete(sigqueue, sig);
goto out;
}
@ -2136,14 +2132,12 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* The process wants to catch it so it needs
* to run at least one thread, but which one?
*/
PROC_SUNLOCK(p);
goto runfast;
}
/*
* The signal is not ignored or caught.
*/
thread_unsuspend(p);
PROC_SUNLOCK(p);
goto out;
}
@ -2167,12 +2161,10 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* It may run a bit until it hits a thread_suspend_check().
*/
wakeup_swapper = 0;
PROC_SLOCK(p);
thread_lock(td);
if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR))
wakeup_swapper = sleepq_abort(td, intrval);
thread_unlock(td);
PROC_SUNLOCK(p);
if (wakeup_swapper)
kick_proc0();
goto out;
@ -2193,7 +2185,6 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
goto out;
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
PROC_SLOCK(p);
sig_suspend_threads(td, p, 1);
if (p->p_numthreads == p->p_suspcount) {
/*
@ -2204,10 +2195,8 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
* should never be equal to p_suspcount.
*/
thread_stopped(p);
PROC_SUNLOCK(p);
sigqueue_delete_proc(p, p->p_xstat);
} else
PROC_SUNLOCK(p);
}
goto out;
}
} else {
@ -2222,12 +2211,8 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
*/
runfast:
tdsigwakeup(td, sig, action, intrval);
PROC_SLOCK(p);
thread_unsuspend(p);
PROC_SUNLOCK(p);
out:
/* If we jump here, proc slock should not be owned. */
PROC_SLOCK_ASSERT(p, MA_NOTOWNED);
return (ret);
}
@ -2247,7 +2232,6 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
PROC_LOCK_ASSERT(p, MA_OWNED);
prop = sigprop(sig);
PROC_SLOCK(p);
thread_lock(td);
/*
* Bring the priority of a thread up if we want it to get
@ -2271,7 +2255,6 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
*/
if ((prop & SA_CONT) && action == SIG_DFL) {
thread_unlock(td);
PROC_SUNLOCK(p);
sigqueue_delete(&p->p_sigqueue, sig);
/*
* It may be on either list in this state.
@ -2300,7 +2283,6 @@ tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval)
#endif
}
out:
PROC_SUNLOCK(p);
thread_unlock(td);
if (wakeup_swapper)
kick_proc0();
@ -2312,7 +2294,6 @@ sig_suspend_threads(struct thread *td, struct proc *p, int sending)
struct thread *td2;
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
FOREACH_THREAD_IN_PROC(p, td2) {
thread_lock(td2);
@ -2344,11 +2325,9 @@ ptracestop(struct thread *td, int sig)
td->td_dbgflags |= TDB_XSIG;
td->td_xsig = sig;
PROC_SLOCK(p);
while ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_XSIG)) {
if (p->p_flag & P_SINGLE_EXIT) {
td->td_dbgflags &= ~TDB_XSIG;
PROC_SUNLOCK(p);
return (sig);
}
/*
@ -2370,7 +2349,6 @@ ptracestop(struct thread *td, int sig)
goto stopme;
}
}
PROC_SUNLOCK(p);
return (td->td_xsig);
}
@ -2511,10 +2489,8 @@ issignal(td)
&p->p_mtx.lock_object, "Catching SIGSTOP");
p->p_flag |= P_STOPPED_SIG;
p->p_xstat = sig;
PROC_SLOCK(p);
sig_suspend_threads(td, p, 0);
thread_suspend_switch(td);
PROC_SUNLOCK(p);
mtx_lock(&ps->ps_mtx);
break;
} else if (prop & SA_IGNORE) {
@ -2556,18 +2532,15 @@ thread_stopped(struct proc *p)
int n;
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
n = p->p_suspcount;
if (p == curproc)
n++;
if ((p->p_flag & P_STOPPED_SIG) && (n == p->p_numthreads)) {
PROC_SUNLOCK(p);
p->p_flag &= ~P_WAITED;
PROC_LOCK(p->p_pptr);
childproc_stopped(p, (p->p_flag & P_TRACED) ?
CLD_TRAPPED : CLD_STOPPED);
PROC_UNLOCK(p->p_pptr);
PROC_SLOCK(p);
}
}

View file

@ -283,7 +283,6 @@ thr_exit(struct thread *td, struct thr_exit_args *uap)
PROC_LOCK(p);
sigqueue_flush(&td->td_sigqueue);
PROC_SLOCK(p);
/*
* Shutting down last thread in the proc. This will actually
@ -291,10 +290,10 @@ thr_exit(struct thread *td, struct thr_exit_args *uap)
*/
if (p->p_numthreads != 1) {
thread_stopped(p);
PROC_SLOCK(p);
thread_exit();
/* NOTREACHED */
}
PROC_SUNLOCK(p);
PROC_UNLOCK(p);
return (0);
}

View file

@ -543,7 +543,6 @@ thread_single(int mode)
p->p_flag &= ~P_SINGLE_BOUNDARY;
}
p->p_flag |= P_STOPPED_SINGLE;
PROC_SLOCK(p);
p->p_singlethread = td;
if (mode == SINGLE_EXIT)
remaining = p->p_numthreads;
@ -642,7 +641,6 @@ thread_single(int mode)
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
thread_unthread(td);
}
PROC_SUNLOCK(p);
return (0);
}
@ -716,15 +714,16 @@ thread_suspend_check(int return_instead)
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
sigqueue_flush(&td->td_sigqueue);
PROC_SLOCK(p);
thread_stopped(p);
/*
* If the process is waiting for us to exit,
* this thread should just suicide.
* Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
*/
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
PROC_SLOCK(p);
thread_exit();
}
if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
if (p->p_numthreads == p->p_suspcount + 1) {
thread_lock(p->p_singlethread);
@ -735,8 +734,8 @@ thread_suspend_check(int return_instead)
kick_proc0();
}
}
PROC_UNLOCK(p);
thread_lock(td);
PROC_UNLOCK(p);
/*
* When a thread suspends, it just
* gets taken off all queues.
@ -746,7 +745,6 @@ thread_suspend_check(int return_instead)
p->p_boundary_count++;
td->td_flags |= TDF_BOUNDARY;
}
PROC_SUNLOCK(p);
mi_switch(SW_INVOL | SWT_SUSPEND, NULL);
if (return_instead == 0)
td->td_flags &= ~TDF_BOUNDARY;
@ -766,25 +764,22 @@ thread_suspend_switch(struct thread *td)
p = td->td_proc;
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
/*
* We implement thread_suspend_one in stages here to avoid
* dropping the proc lock while the thread lock is owned.
*/
thread_stopped(p);
p->p_suspcount++;
PROC_UNLOCK(p);
thread_lock(td);
PROC_UNLOCK(p);
td->td_flags &= ~TDF_NEEDSUSPCHK;
TD_SET_SUSPENDED(td);
sched_sleep(td, 0);
PROC_SUNLOCK(p);
DROP_GIANT();
mi_switch(SW_VOL | SWT_SUSPEND, NULL);
thread_unlock(td);
PICKUP_GIANT();
PROC_LOCK(p);
PROC_SLOCK(p);
}
void
@ -792,7 +787,6 @@ thread_suspend_one(struct thread *td)
{
struct proc *p = td->td_proc;
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
p->p_suspcount++;
@ -806,7 +800,6 @@ thread_unsuspend_one(struct thread *td)
{
struct proc *p = td->td_proc;
PROC_SLOCK_ASSERT(p, MA_OWNED);
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
TD_CLR_SUSPENDED(td);
@ -824,7 +817,6 @@ thread_unsuspend(struct proc *p)
int wakeup_swapper;
PROC_LOCK_ASSERT(p, MA_OWNED);
PROC_SLOCK_ASSERT(p, MA_OWNED);
wakeup_swapper = 0;
if (!P_SHOULDSTOP(p)) {
FOREACH_THREAD_IN_PROC(p, td) {
@ -863,7 +855,6 @@ thread_single_end(void)
p = td->td_proc;
PROC_LOCK_ASSERT(p, MA_OWNED);
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
PROC_SLOCK(p);
p->p_singlethread = NULL;
wakeup_swapper = 0;
/*
@ -881,7 +872,6 @@ thread_single_end(void)
thread_unlock(td);
}
}
PROC_SUNLOCK(p);
if (wakeup_swapper)
kick_proc0();
}

View file

@ -395,6 +395,8 @@ sleepq_catch_signals(void *wchan, int pri)
sleepq_switch(wchan, pri);
return (0);
}
catch_sig:
thread_unlock(td);
mtx_unlock_spin(&sc->sc_lock);
CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
@ -414,20 +416,19 @@ sleepq_catch_signals(void *wchan, int pri)
ret = ERESTART;
mtx_unlock(&ps->ps_mtx);
}
/*
* Lock the per-process spinlock prior to dropping the PROC_LOCK
* to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
* thread_lock() are currently held in tdsignal().
*/
PROC_SLOCK(p);
mtx_lock_spin(&sc->sc_lock);
PROC_UNLOCK(p);
mtx_lock_spin(&sc->sc_lock);
thread_lock(td);
PROC_SUNLOCK(p);
if (ret == 0) {
sleepq_switch(wchan, pri);
return (0);
}
if (ret != 0)
goto out;
if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0)
goto catch_sig;
sleepq_switch(wchan, pri);
return (0);
out:
/*
* There were pending signals and this thread is still
* on the sleep queue, remove it from the sleep queue.

View file

@ -795,10 +795,8 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
* you should use PT_SUSPEND to suspend it before
* continuing process.
*/
PROC_SLOCK(p);
p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
thread_unsuspend(p);
PROC_SUNLOCK(p);
} else {
if (data)
psignal(p, data);

View file

@ -500,8 +500,8 @@ struct proc {
u_char p_pfsflags; /* (c) Procfs flags. */
struct nlminfo *p_nlminfo; /* (?) Only used by/for lockd. */
struct kaioinfo *p_aioinfo; /* (c) ASYNC I/O info. */
struct thread *p_singlethread;/* (c + j) If single threading this is it */
int p_suspcount; /* (j) Num threads in suspended mode. */
struct thread *p_singlethread;/* (c) If single threading this is it */
int p_suspcount; /* (c) Num threads in suspended mode. */
struct thread *p_xthread; /* (c) Trap thread */
int p_boundary_count;/* (c) Num threads at user boundary */
int p_pendingcnt; /* how many signals are pending */