mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
sched/core: Use PELT for scale_rt_capacity()
The utilization of the CPU by RT, DL and IRQs are now tracked with PELT so we can use these metrics instead of rt_avg to evaluate the remaining capacity available for CFS class. scale_rt_capacity() behavior has been changed and now returns the remaining capacity available for CFS instead of a scaling factor because RT, DL and IRQ provide now absolute utilization value. The same formula as schedutil is used: IRQ util_avg + (1 - IRQ util_avg / max capacity ) * /Sum rq util_avg but the implementation is different because it doesn't return the same value and doesn't benefit of the same optimization. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: claudio@evidence.eu.com Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: joel@joelfernandes.org Cc: juri.lelli@redhat.com Cc: luca.abeni@santannapisa.it Cc: patrick.bellasi@arm.com Cc: quentin.perret@arm.com Cc: rjw@rjwysocki.net Cc: valentin.schneider@arm.com Cc: viresh.kumar@linaro.org Link: http://lkml.kernel.org/r/1530200714-4504-10-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
dfa444dc2f
commit
523e979d31
4 changed files with 23 additions and 27 deletions
|
@ -1180,8 +1180,6 @@ static void update_curr_dl(struct rq *rq)
|
||||||
curr->se.exec_start = now;
|
curr->se.exec_start = now;
|
||||||
cgroup_account_cputime(curr, delta_exec);
|
cgroup_account_cputime(curr, delta_exec);
|
||||||
|
|
||||||
sched_rt_avg_update(rq, delta_exec);
|
|
||||||
|
|
||||||
if (dl_entity_is_special(dl_se))
|
if (dl_entity_is_special(dl_se))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
|
@ -7551,39 +7551,39 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
||||||
static unsigned long scale_rt_capacity(int cpu)
|
static unsigned long scale_rt_capacity(int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
u64 total, used, age_stamp, avg;
|
unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
|
||||||
s64 delta;
|
unsigned long used, free;
|
||||||
|
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||||
|
unsigned long irq;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||||
* Since we're reading these variables without serialization make sure
|
irq = READ_ONCE(rq->avg_irq.util_avg);
|
||||||
* we read them once before doing sanity checks on them.
|
|
||||||
*/
|
|
||||||
age_stamp = READ_ONCE(rq->age_stamp);
|
|
||||||
avg = READ_ONCE(rq->rt_avg);
|
|
||||||
delta = __rq_clock_broken(rq) - age_stamp;
|
|
||||||
|
|
||||||
if (unlikely(delta < 0))
|
if (unlikely(irq >= max))
|
||||||
delta = 0;
|
return 1;
|
||||||
|
#endif
|
||||||
|
|
||||||
total = sched_avg_period() + delta;
|
used = READ_ONCE(rq->avg_rt.util_avg);
|
||||||
|
used += READ_ONCE(rq->avg_dl.util_avg);
|
||||||
|
|
||||||
used = div_u64(avg, total);
|
if (unlikely(used >= max))
|
||||||
|
return 1;
|
||||||
|
|
||||||
if (likely(used < SCHED_CAPACITY_SCALE))
|
free = max - used;
|
||||||
return SCHED_CAPACITY_SCALE - used;
|
#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
|
||||||
|
free *= (max - irq);
|
||||||
return 1;
|
free /= max;
|
||||||
|
#endif
|
||||||
|
return free;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||||
{
|
{
|
||||||
unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
|
unsigned long capacity = scale_rt_capacity(cpu);
|
||||||
struct sched_group *sdg = sd->groups;
|
struct sched_group *sdg = sd->groups;
|
||||||
|
|
||||||
cpu_rq(cpu)->cpu_capacity_orig = capacity;
|
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
|
||||||
|
|
||||||
capacity *= scale_rt_capacity(cpu);
|
|
||||||
capacity >>= SCHED_CAPACITY_SHIFT;
|
|
||||||
|
|
||||||
if (!capacity)
|
if (!capacity)
|
||||||
capacity = 1;
|
capacity = 1;
|
||||||
|
|
|
@ -237,7 +237,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runna
|
||||||
*/
|
*/
|
||||||
sa->load_avg = div_u64(load * sa->load_sum, divider);
|
sa->load_avg = div_u64(load * sa->load_sum, divider);
|
||||||
sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
|
sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
|
||||||
sa->util_avg = sa->util_sum / divider;
|
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -973,8 +973,6 @@ static void update_curr_rt(struct rq *rq)
|
||||||
curr->se.exec_start = now;
|
curr->se.exec_start = now;
|
||||||
cgroup_account_cputime(curr, delta_exec);
|
cgroup_account_cputime(curr, delta_exec);
|
||||||
|
|
||||||
sched_rt_avg_update(rq, delta_exec);
|
|
||||||
|
|
||||||
if (!rt_bandwidth_enabled())
|
if (!rt_bandwidth_enabled())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue