mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
sched/numa: Export info needed for NUMA balancing on complex topologies
Export some information that is necessary to do placement of tasks on systems with multi-level NUMA topologies. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: mgorman@suse.de Cc: chegu_vinod@hp.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1413530994-9732-2-git-send-email-riel@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f3a7e1a9c4
commit
9942f79baa
2 changed files with 8 additions and 1 deletions
|
@ -6129,6 +6129,7 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
|
|||
#ifdef CONFIG_NUMA
|
||||
static int sched_domains_numa_levels;
|
||||
static int *sched_domains_numa_distance;
|
||||
int sched_max_numa_distance;
|
||||
static struct cpumask ***sched_domains_numa_masks;
|
||||
static int sched_domains_curr_level;
|
||||
#endif
|
||||
|
@ -6300,7 +6301,7 @@ static void sched_numa_warn(const char *str)
|
|||
printk(KERN_WARNING "\n");
|
||||
}
|
||||
|
||||
static bool find_numa_distance(int distance)
|
||||
bool find_numa_distance(int distance)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -6447,6 +6448,7 @@ static void sched_init_numa(void)
|
|||
sched_domain_topology = tl;
|
||||
|
||||
sched_domains_numa_levels = level;
|
||||
sched_max_numa_distance = sched_domains_numa_distance[level - 1];
|
||||
}
|
||||
|
||||
static void sched_domains_numa_masks_set(int cpu)
|
||||
|
|
|
@ -678,6 +678,11 @@ static inline u64 rq_clock_task(struct rq *rq)
|
|||
return rq->clock_task;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int sched_max_numa_distance;
|
||||
extern bool find_numa_distance(int distance);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
extern void sched_setnuma(struct task_struct *p, int node);
|
||||
extern int migrate_task_to(struct task_struct *p, int cpu);
|
||||
|
|
Loading…
Reference in a new issue