From 08fd6713b2ef5ffeadc34ad38a8c48a568962a9a Mon Sep 17 00:00:00 2001 From: Jeff Roberson Date: Wed, 15 Oct 2003 07:47:06 +0000 Subject: [PATCH] - If our user_pri doesn't match our actual priority our priority has been elevated either due to priority propagation or because we're in the kernel in either case, put us on the current queue so that we dont stop others from using important resources. At some point the priority elevations from sleeping in the kernel should go away. - Remove an optimization in sched_userret(). Before we would only set NEEDRESCHED if there was something of a higher priority available. This is a trivial optimization and it breaks priority propagation because it doesn't take threads which we may be blocking into account. Notice that the thread which is blocking others gets up to one tick of cpu time before we honor this NEEDRESCHED in sched_clock(). --- sys/kern/sched_ule.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index 635a0ddf2de6..c0e20446b7bb 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -182,9 +182,8 @@ struct td_sched *thread0_sched = &td_sched; #define SCHED_INTERACTIVE(kg) \ (sched_interact_score(kg) < SCHED_INTERACT_THRESH) #define SCHED_CURR(kg, ke) \ - (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || \ - SCHED_INTERACTIVE(kg) || \ - mtx_ownedby(&Giant, (ke)->ke_thread)) + (ke->ke_thread->td_priority != kg->kg_user_pri || \ + SCHED_INTERACTIVE(kg)) /* * Cpu percentage computation macros and defines. @@ -1152,14 +1151,21 @@ void sched_userret(struct thread *td) { struct ksegrp *kg; +#if 0 struct kseq *kseq; struct kse *ke; +#endif kg = td->td_ksegrp; if (td->td_priority != kg->kg_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; + /* + * This optimization is temporarily disabled because it + * breaks priority propagation. + */ +#if 0 kseq = KSEQ_SELF(); if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE && #ifdef SMP @@ -1169,6 +1175,7 @@ sched_userret(struct thread *td) #endif (ke = kseq_choose(kseq, 0)) != NULL && ke->ke_thread->td_priority < td->td_priority) +#endif curthread->td_flags |= TDF_NEEDRESCHED; mtx_unlock_spin(&sched_lock); }