sched/fair: Default to false in test_idle_cores()

It's uncertain whether idle cores exist or not if shared sched-
domains are not ready, so returning "no idle cores" usually
makes sense.

While __update_idle_core() is an exception, it checks status
of this core and set hint to shared sched-domain if necessary.
So the whole logic of this function depends on the existence
of shared sched-domain, and can certainly bail out early if
it is not available.

It's somehow a little tricky, and as Josh suggested that it
should be transient while the domain isn't ready. So remove
the self-defined default value to make things more clearer.

Signed-off-by: Abel Wu <wuyun.abel@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Josh Don <joshdon@google.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Link: https://lore.kernel.org/r/20220907112000.1854-5-wuyun.abel@bytedance.com
This commit is contained in:
Abel Wu 2022-09-07 19:19:59 +08:00 committed by Peter Zijlstra
parent 8eeeed9c4a
commit 398ba2b0cc

View file

@ -1588,11 +1588,11 @@ numa_type numa_classify(unsigned int imbalance_pct,
#ifdef CONFIG_SCHED_SMT
/* Forward declarations of select_idle_sibling helpers */
static inline bool test_idle_cores(int cpu, bool def);
static inline bool test_idle_cores(int cpu);
static inline int numa_idle_core(int idle_core, int cpu)
{
if (!static_branch_likely(&sched_smt_present) ||
idle_core >= 0 || !test_idle_cores(cpu, false))
idle_core >= 0 || !test_idle_cores(cpu))
return idle_core;
/*
@ -6271,7 +6271,7 @@ static inline void set_idle_cores(int cpu, int val)
WRITE_ONCE(sds->has_idle_cores, val);
}
static inline bool test_idle_cores(int cpu, bool def)
static inline bool test_idle_cores(int cpu)
{
struct sched_domain_shared *sds;
@ -6279,7 +6279,7 @@ static inline bool test_idle_cores(int cpu, bool def)
if (sds)
return READ_ONCE(sds->has_idle_cores);
return def;
return false;
}
/*
@ -6295,7 +6295,7 @@ void __update_idle_core(struct rq *rq)
int cpu;
rcu_read_lock();
if (test_idle_cores(core, true))
if (test_idle_cores(core))
goto unlock;
for_each_cpu(cpu, cpu_smt_mask(core)) {
@ -6367,9 +6367,9 @@ static inline void set_idle_cores(int cpu, int val)
{
}
static inline bool test_idle_cores(int cpu, bool def)
static inline bool test_idle_cores(int cpu)
{
return def;
return false;
}
static inline int select_idle_core(struct task_struct *p, int core, struct cpumask *cpus, int *idle_cpu)
@ -6608,7 +6608,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
return target;
if (sched_smt_active()) {
has_idle_core = test_idle_cores(target, false);
has_idle_core = test_idle_cores(target);
if (!has_idle_core && cpus_share_cache(prev, target)) {
i = select_idle_smt(p, prev);