mirror of
https://github.com/torvalds/linux
synced 2024-10-06 19:34:19 +00:00
sched/fair: Simplify the continue_balancing logic in sched_balance_newidle()
newidle(CPU_NEWLY_IDLE) balancing doesn't stop the load-balancing if the continue_balancing flag is reset, but the other two balancing (IDLE, BUSY) cases do that. newidle balance stops the load balancing if rq has a task or there is wakeup pending. The same checks are present in should_we_balance for newidle. Hence use the return value and simplify continue_balancing mechanism for newidle. Update the comment surrounding it as well. No change in functionality intended. Signed-off-by: Shrikanth Hegde <sshegde@linux.ibm.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Link: https://lore.kernel.org/r/20240325153926.274284-1-sshegde@linux.ibm.com
This commit is contained in:
parent
d0f5d3cefc
commit
c829d6818b
|
@ -12358,6 +12358,7 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
|
|||
{
|
||||
unsigned long next_balance = jiffies + HZ;
|
||||
int this_cpu = this_rq->cpu;
|
||||
int continue_balancing = 1;
|
||||
u64 t0, t1, curr_cost = 0;
|
||||
struct sched_domain *sd;
|
||||
int pulled_task = 0;
|
||||
|
@ -12372,8 +12373,9 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
|
|||
return 0;
|
||||
|
||||
/*
|
||||
* We must set idle_stamp _before_ calling idle_balance(), such that we
|
||||
* measure the duration of idle_balance() as idle time.
|
||||
* We must set idle_stamp _before_ calling sched_balance_rq()
|
||||
* for CPU_NEWLY_IDLE, such that we measure the this duration
|
||||
* as idle time.
|
||||
*/
|
||||
this_rq->idle_stamp = rq_clock(this_rq);
|
||||
|
||||
|
@ -12412,7 +12414,6 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
|
|||
|
||||
rcu_read_lock();
|
||||
for_each_domain(this_cpu, sd) {
|
||||
int continue_balancing = 1;
|
||||
u64 domain_cost;
|
||||
|
||||
update_next_balance(sd, &next_balance);
|
||||
|
@ -12438,8 +12439,7 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
|
|||
* Stop searching for tasks to pull if there are
|
||||
* now runnable tasks on this rq.
|
||||
*/
|
||||
if (pulled_task || this_rq->nr_running > 0 ||
|
||||
this_rq->ttwu_pending)
|
||||
if (pulled_task || !continue_balancing)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
|
Loading…
Reference in a new issue