subr_smp: Clean up topology analysis, add additional layers

Rather than repeatedly nesting loops, separate concerns with a single loop
per call stack level.  Use a table to drive the recursive routine.  Handle
missing topology layers more gracefully (infer a single unit).

Analyze some additional optional layers which may be present on e.g. AMD Zen
systems (groups, aka dies, per package; and cachegroups, aka CCXes, per
group).

Display that additional information in the boot-time topology information,
when it is relevent (non-one).

Reviewed by:	markj@, mjoras@ (earlier version)
Sponsored by:	Dell EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D12019
This commit is contained in:
Conrad Meyer 2017-08-22 00:10:15 +00:00
parent 9b0ddce6bc
commit bb14d5643b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=322776
3 changed files with 131 additions and 107 deletions

View file

@ -993,7 +993,7 @@ topo_next_node(struct topo_node *top, struct topo_node *node)
if ((next = TAILQ_NEXT(node, siblings)) != NULL) if ((next = TAILQ_NEXT(node, siblings)) != NULL)
return (next); return (next);
while ((node = node->parent) != top) while (node != top && (node = node->parent) != top)
if ((next = TAILQ_NEXT(node, siblings)) != NULL) if ((next = TAILQ_NEXT(node, siblings)) != NULL)
return (next); return (next);
@ -1012,7 +1012,7 @@ topo_next_nonchild_node(struct topo_node *top, struct topo_node *node)
if ((next = TAILQ_NEXT(node, siblings)) != NULL) if ((next = TAILQ_NEXT(node, siblings)) != NULL)
return (next); return (next);
while ((node = node->parent) != top) while (node != top && (node = node->parent) != top)
if ((next = TAILQ_NEXT(node, siblings)) != NULL) if ((next = TAILQ_NEXT(node, siblings)) != NULL)
return (next); return (next);
@ -1044,105 +1044,99 @@ topo_set_pu_id(struct topo_node *node, cpuid_t id)
} }
} }
static struct topology_spec {
topo_node_type type;
bool match_subtype;
uintptr_t subtype;
} topology_level_table[TOPO_LEVEL_COUNT] = {
[TOPO_LEVEL_PKG] = { .type = TOPO_TYPE_PKG, },
[TOPO_LEVEL_GROUP] = { .type = TOPO_TYPE_GROUP, },
[TOPO_LEVEL_CACHEGROUP] = {
.type = TOPO_TYPE_CACHE,
.match_subtype = true,
.subtype = CG_SHARE_L3,
},
[TOPO_LEVEL_CORE] = { .type = TOPO_TYPE_CORE, },
[TOPO_LEVEL_THREAD] = { .type = TOPO_TYPE_PU, },
};
static bool
topo_analyze_table(struct topo_node *root, int all, enum topo_level level,
struct topo_analysis *results)
{
struct topology_spec *spec;
struct topo_node *node;
int count;
if (level >= TOPO_LEVEL_COUNT)
return (true);
spec = &topology_level_table[level];
count = 0;
node = topo_next_node(root, root);
while (node != NULL) {
if (node->type != spec->type ||
(spec->match_subtype && node->subtype != spec->subtype)) {
node = topo_next_node(root, node);
continue;
}
if (!all && CPU_EMPTY(&node->cpuset)) {
node = topo_next_nonchild_node(root, node);
continue;
}
count++;
if (!topo_analyze_table(node, all, level + 1, results))
return (false);
node = topo_next_nonchild_node(root, node);
}
/* No explicit subgroups is essentially one subgroup. */
if (count == 0) {
count = 1;
if (!topo_analyze_table(root, all, level + 1, results))
return (false);
}
if (results->entities[level] == -1)
results->entities[level] = count;
else if (results->entities[level] != count)
return (false);
return (true);
}
/* /*
* Check if the topology is uniform, that is, each package has the same number * Check if the topology is uniform, that is, each package has the same number
* of cores in it and each core has the same number of threads (logical * of cores in it and each core has the same number of threads (logical
* processors) in it. If so, calculate the number of package, the number of * processors) in it. If so, calculate the number of packages, the number of
* cores per package and the number of logical processors per core. * groups per package, the number of cachegroups per group, and the number of
* 'all' parameter tells whether to include administratively disabled logical * logical processors per cachegroup. 'all' parameter tells whether to include
* processors into the analysis. * administratively disabled logical processors into the analysis.
*/ */
int int
topo_analyze(struct topo_node *topo_root, int all, topo_analyze(struct topo_node *topo_root, int all,
int *pkg_count, int *cores_per_pkg, int *thrs_per_core) struct topo_analysis *results)
{ {
struct topo_node *pkg_node;
struct topo_node *core_node;
struct topo_node *pu_node;
int thrs_per_pkg;
int cpp_counter;
int tpc_counter;
int tpp_counter;
*pkg_count = 0; results->entities[TOPO_LEVEL_PKG] = -1;
*cores_per_pkg = -1; results->entities[TOPO_LEVEL_CORE] = -1;
*thrs_per_core = -1; results->entities[TOPO_LEVEL_THREAD] = -1;
thrs_per_pkg = -1; results->entities[TOPO_LEVEL_GROUP] = -1;
pkg_node = topo_root; results->entities[TOPO_LEVEL_CACHEGROUP] = -1;
while (pkg_node != NULL) {
if (pkg_node->type != TOPO_TYPE_PKG) {
pkg_node = topo_next_node(topo_root, pkg_node);
continue;
}
if (!all && CPU_EMPTY(&pkg_node->cpuset)) {
pkg_node = topo_next_nonchild_node(topo_root, pkg_node);
continue;
}
(*pkg_count)++; if (!topo_analyze_table(topo_root, all, TOPO_LEVEL_PKG, results))
return (0);
cpp_counter = 0; KASSERT(results->entities[TOPO_LEVEL_PKG] > 0,
tpp_counter = 0;
core_node = pkg_node;
while (core_node != NULL) {
if (core_node->type == TOPO_TYPE_CORE) {
if (!all && CPU_EMPTY(&core_node->cpuset)) {
core_node =
topo_next_nonchild_node(pkg_node,
core_node);
continue;
}
cpp_counter++;
tpc_counter = 0;
pu_node = core_node;
while (pu_node != NULL) {
if (pu_node->type == TOPO_TYPE_PU &&
(all || !CPU_EMPTY(&pu_node->cpuset)))
tpc_counter++;
pu_node = topo_next_node(core_node,
pu_node);
}
if (*thrs_per_core == -1)
*thrs_per_core = tpc_counter;
else if (*thrs_per_core != tpc_counter)
return (0);
core_node = topo_next_nonchild_node(pkg_node,
core_node);
} else {
/* PU node directly under PKG. */
if (core_node->type == TOPO_TYPE_PU &&
(all || !CPU_EMPTY(&core_node->cpuset)))
tpp_counter++;
core_node = topo_next_node(pkg_node,
core_node);
}
}
if (*cores_per_pkg == -1)
*cores_per_pkg = cpp_counter;
else if (*cores_per_pkg != cpp_counter)
return (0);
if (thrs_per_pkg == -1)
thrs_per_pkg = tpp_counter;
else if (thrs_per_pkg != tpp_counter)
return (0);
pkg_node = topo_next_nonchild_node(topo_root, pkg_node);
}
KASSERT(*pkg_count > 0,
("bug in topology or analysis")); ("bug in topology or analysis"));
if (*cores_per_pkg == 0) {
KASSERT(*thrs_per_core == -1 && thrs_per_pkg > 0,
("bug in topology or analysis"));
*thrs_per_core = thrs_per_pkg;
}
return (1); return (1);
} }
#endif /* SMP */ #endif /* SMP */

View file

@ -120,8 +120,25 @@ struct topo_node * topo_next_node(struct topo_node *top,
struct topo_node * topo_next_nonchild_node(struct topo_node *top, struct topo_node * topo_next_nonchild_node(struct topo_node *top,
struct topo_node *node); struct topo_node *node);
void topo_set_pu_id(struct topo_node *node, cpuid_t id); void topo_set_pu_id(struct topo_node *node, cpuid_t id);
int topo_analyze(struct topo_node *topo_root, int all, int *pkg_count,
int *cores_per_pkg, int *thrs_per_core); enum topo_level {
TOPO_LEVEL_PKG = 0,
/*
* Some systems have useful sub-package core organizations. On these,
* a package has one or more subgroups. Each subgroup contains one or
* more cache groups (cores that share a last level cache).
*/
TOPO_LEVEL_GROUP,
TOPO_LEVEL_CACHEGROUP,
TOPO_LEVEL_CORE,
TOPO_LEVEL_THREAD,
TOPO_LEVEL_COUNT /* Must be last */
};
struct topo_analysis {
int entities[TOPO_LEVEL_COUNT];
};
int topo_analyze(struct topo_node *topo_root, int all,
struct topo_analysis *results);
#define TOPO_FOREACH(i, root) \ #define TOPO_FOREACH(i, root) \
for (i = root; i != NULL; i = topo_next_node(root, i)) for (i = root; i != NULL; i = topo_next_node(root, i))

View file

@ -662,18 +662,23 @@ cpu_mp_announce(void)
{ {
struct topo_node *node; struct topo_node *node;
const char *hyperthread; const char *hyperthread;
int pkg_count; struct topo_analysis topology;
int cores_per_pkg;
int thrs_per_core;
printf("FreeBSD/SMP: "); printf("FreeBSD/SMP: ");
if (topo_analyze(&topo_root, 1, &pkg_count, if (topo_analyze(&topo_root, 1, &topology)) {
&cores_per_pkg, &thrs_per_core)) { printf("%d package(s)", topology.entities[TOPO_LEVEL_PKG]);
printf("%d package(s)", pkg_count); if (topology.entities[TOPO_LEVEL_GROUP] > 1)
if (cores_per_pkg > 0) printf(" x %d groups",
printf(" x %d core(s)", cores_per_pkg); topology.entities[TOPO_LEVEL_GROUP]);
if (thrs_per_core > 1) if (topology.entities[TOPO_LEVEL_CACHEGROUP] > 1)
printf(" x %d hardware threads", thrs_per_core); printf(" x %d cache groups",
topology.entities[TOPO_LEVEL_CACHEGROUP]);
if (topology.entities[TOPO_LEVEL_CORE] > 0)
printf(" x %d core(s)",
topology.entities[TOPO_LEVEL_CORE]);
if (topology.entities[TOPO_LEVEL_THREAD] > 1)
printf(" x %d hardware threads",
topology.entities[TOPO_LEVEL_THREAD]);
} else { } else {
printf("Non-uniform topology"); printf("Non-uniform topology");
} }
@ -681,13 +686,21 @@ cpu_mp_announce(void)
if (disabled_cpus) { if (disabled_cpus) {
printf("FreeBSD/SMP Online: "); printf("FreeBSD/SMP Online: ");
if (topo_analyze(&topo_root, 0, &pkg_count, if (topo_analyze(&topo_root, 0, &topology)) {
&cores_per_pkg, &thrs_per_core)) { printf("%d package(s)",
printf("%d package(s)", pkg_count); topology.entities[TOPO_LEVEL_PKG]);
if (cores_per_pkg > 0) if (topology.entities[TOPO_LEVEL_GROUP] > 1)
printf(" x %d core(s)", cores_per_pkg); printf(" x %d groups",
if (thrs_per_core > 1) topology.entities[TOPO_LEVEL_GROUP]);
printf(" x %d hardware threads", thrs_per_core); if (topology.entities[TOPO_LEVEL_CACHEGROUP] > 1)
printf(" x %d cache groups",
topology.entities[TOPO_LEVEL_CACHEGROUP]);
if (topology.entities[TOPO_LEVEL_CORE] > 0)
printf(" x %d core(s)",
topology.entities[TOPO_LEVEL_CORE]);
if (topology.entities[TOPO_LEVEL_THREAD] > 1)
printf(" x %d hardware threads",
topology.entities[TOPO_LEVEL_THREAD]);
} else { } else {
printf("Non-uniform topology"); printf("Non-uniform topology");
} }