- Introduce a new flag KEF_HOLD that prevents sched_add() from doing a

migration.  Use this in sched_prio() and sched_switch() to stop us from
   migrating threads that are in short term sleeps or are runnable.  These
   extra migrations were added in the patches to support KSE.
 - Only set NEEDRESCHED if the thread we're adding in sched_add() is a
   lower priority and is being placed on the current queue.
 - Fix some minor whitespace problems.
This commit is contained in:
Jeff Roberson 2004-08-12 07:56:33 +00:00
parent 43ae335011
commit f2b74cbf28
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=133555

View file

@ -101,6 +101,7 @@ struct ke_sched {
#define KEF_ASSIGNED KEF_SCHED0 /* KSE is being migrated. */
#define KEF_BOUND KEF_SCHED1 /* KSE can not migrate. */
#define KEF_XFERABLE KEF_SCHED2 /* KSE was added as transferable. */
#define KEF_HOLD KEF_SCHED3 /* KSE is temporarily bound. */
struct kg_sched {
int skg_slptime; /* Number of ticks we vol. slept */
@ -1143,6 +1144,12 @@ sched_prio(struct thread *td, u_char prio)
ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
runq_add(ke->ke_runq, ke);
}
/*
* Hold this kse on this cpu so that sched_prio() doesn't
* cause excessive migration. We only want migration to
* happen as the result of a wakeup.
*/
ke->ke_flags |= KEF_HOLD;
adjustrunqueue(td, prio);
} else
td->td_priority = prio;
@ -1172,6 +1179,10 @@ sched_switch(struct thread *td, struct thread *newtd)
TD_SET_CAN_RUN(td);
} else if (TD_IS_RUNNING(td)) {
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
/*
* Don't allow the kse to migrate from a preemption.
*/
ke->ke_flags |= KEF_HOLD;
setrunqueue(td);
} else {
if (ke->ke_runq) {
@ -1294,7 +1305,6 @@ sched_fork(struct thread *td, struct proc *p1)
void
sched_fork_kse(struct thread *td, struct kse *child)
{
struct kse *ke = td->td_kse;
child->ke_slice = 1; /* Attempt to quickly learn interactivity. */
@ -1642,9 +1652,10 @@ sched_add_internal(struct thread *td, int preemptive)
* to do it.
*/
canmigrate = KSE_CAN_MIGRATE(ke, class);
if (TD_IS_RUNNING(td))
if (ke->ke_flags & KEF_HOLD) {
ke->ke_flags &= ~KEF_HOLD;
canmigrate = 0;
}
/*
* If this thread is pinned or bound, notify the target cpu.
*/
@ -1678,7 +1689,8 @@ sched_add_internal(struct thread *td, int preemptive)
/*
* XXX With preemption this is not necessary.
*/
if (td->td_priority < curthread->td_priority)
if (td->td_priority < curthread->td_priority &&
ke->ke_runq == kseq->ksq_curr)
curthread->td_flags |= TDF_NEEDRESCHED;
if (preemptive && maybe_preempt(td))
return;