mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
perf maps: Move symbol maps functions to maps.c
Move the find and certain other symbol maps__* functions to maps.c for better abstraction. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Colin Ian King <colin.i.king@gmail.com> Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez <german.gomez@arm.com> Cc: Guilherme Amadio <amadio@gentoo.org> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Li Dong <lidong@vivo.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Masami Hiramatsu (Google) <mhiramat@kernel.org> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Ming Wang <wangming01@loongson.cn> Cc: Nick Terrell <terrelln@fb.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Vincent Whitchurch <vincent.whitchurch@axis.com> Cc: Wenyu Liu <liuwenyu7@huawei.com> Cc: Yang Jihong <yangjihong1@huawei.com> Link: https://lore.kernel.org/r/20231127220902.1315692-14-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
9fa688ea34
commit
0f6ab6a3fb
4 changed files with 250 additions and 249 deletions
|
@ -475,3 +475,241 @@ struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
|
|||
|
||||
return rb_entry(next, struct map_rb_node, rb_node);
|
||||
}
|
||||
|
||||
static int map__strcmp(const void *a, const void *b)
|
||||
{
|
||||
const struct map *map_a = *(const struct map **)a;
|
||||
const struct map *map_b = *(const struct map **)b;
|
||||
const struct dso *dso_a = map__dso(map_a);
|
||||
const struct dso *dso_b = map__dso(map_b);
|
||||
int ret = strcmp(dso_a->short_name, dso_b->short_name);
|
||||
|
||||
if (ret == 0 && map_a != map_b) {
|
||||
/*
|
||||
* Ensure distinct but name equal maps have an order in part to
|
||||
* aid reference counting.
|
||||
*/
|
||||
ret = (int)map__start(map_a) - (int)map__start(map_b);
|
||||
if (ret == 0)
|
||||
ret = (int)((intptr_t)map_a - (intptr_t)map_b);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int map__strcmp_name(const void *name, const void *b)
|
||||
{
|
||||
const struct dso *dso = map__dso(*(const struct map **)b);
|
||||
|
||||
return strcmp(name, dso->short_name);
|
||||
}
|
||||
|
||||
void __maps__sort_by_name(struct maps *maps)
|
||||
{
|
||||
qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
|
||||
}
|
||||
|
||||
static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
|
||||
{
|
||||
struct map_rb_node *rb_node;
|
||||
struct map **maps_by_name = realloc(maps__maps_by_name(maps),
|
||||
maps__nr_maps(maps) * sizeof(struct map *));
|
||||
int i = 0;
|
||||
|
||||
if (maps_by_name == NULL)
|
||||
return -1;
|
||||
|
||||
up_read(maps__lock(maps));
|
||||
down_write(maps__lock(maps));
|
||||
|
||||
RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
|
||||
RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
|
||||
|
||||
maps__for_each_entry(maps, rb_node)
|
||||
maps_by_name[i++] = map__get(rb_node->map);
|
||||
|
||||
__maps__sort_by_name(maps);
|
||||
|
||||
up_write(maps__lock(maps));
|
||||
down_read(maps__lock(maps));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct map *__maps__find_by_name(struct maps *maps, const char *name)
|
||||
{
|
||||
struct map **mapp;
|
||||
|
||||
if (maps__maps_by_name(maps) == NULL &&
|
||||
map__groups__sort_by_name_from_rbtree(maps))
|
||||
return NULL;
|
||||
|
||||
mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
|
||||
sizeof(*mapp), map__strcmp_name);
|
||||
if (mapp)
|
||||
return *mapp;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct map *maps__find_by_name(struct maps *maps, const char *name)
|
||||
{
|
||||
struct map_rb_node *rb_node;
|
||||
struct map *map;
|
||||
|
||||
down_read(maps__lock(maps));
|
||||
|
||||
|
||||
if (RC_CHK_ACCESS(maps)->last_search_by_name) {
|
||||
const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
|
||||
|
||||
if (strcmp(dso->short_name, name) == 0) {
|
||||
map = RC_CHK_ACCESS(maps)->last_search_by_name;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If we have maps->maps_by_name, then the name isn't in the rbtree,
|
||||
* as maps->maps_by_name mirrors the rbtree when lookups by name are
|
||||
* made.
|
||||
*/
|
||||
map = __maps__find_by_name(maps, name);
|
||||
if (map || maps__maps_by_name(maps) != NULL)
|
||||
goto out_unlock;
|
||||
|
||||
/* Fallback to traversing the rbtree... */
|
||||
maps__for_each_entry(maps, rb_node) {
|
||||
struct dso *dso;
|
||||
|
||||
map = rb_node->map;
|
||||
dso = map__dso(map);
|
||||
if (strcmp(dso->short_name, name) == 0) {
|
||||
RC_CHK_ACCESS(maps)->last_search_by_name = map;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
map = NULL;
|
||||
|
||||
out_unlock:
|
||||
up_read(maps__lock(maps));
|
||||
return map;
|
||||
}
|
||||
|
||||
void maps__fixup_end(struct maps *maps)
|
||||
{
|
||||
struct map_rb_node *prev = NULL, *curr;
|
||||
|
||||
down_write(maps__lock(maps));
|
||||
|
||||
maps__for_each_entry(maps, curr) {
|
||||
if (prev != NULL && !map__end(prev->map))
|
||||
map__set_end(prev->map, map__start(curr->map));
|
||||
|
||||
prev = curr;
|
||||
}
|
||||
|
||||
/*
|
||||
* We still haven't the actual symbols, so guess the
|
||||
* last map final address.
|
||||
*/
|
||||
if (curr && !map__end(curr->map))
|
||||
map__set_end(curr->map, ~0ULL);
|
||||
|
||||
up_write(maps__lock(maps));
|
||||
}
|
||||
|
||||
/*
|
||||
* Merges map into maps by splitting the new map within the existing map
|
||||
* regions.
|
||||
*/
|
||||
int maps__merge_in(struct maps *kmaps, struct map *new_map)
|
||||
{
|
||||
struct map_rb_node *rb_node;
|
||||
LIST_HEAD(merged);
|
||||
int err = 0;
|
||||
|
||||
maps__for_each_entry(kmaps, rb_node) {
|
||||
struct map *old_map = rb_node->map;
|
||||
|
||||
/* no overload with this one */
|
||||
if (map__end(new_map) < map__start(old_map) ||
|
||||
map__start(new_map) >= map__end(old_map))
|
||||
continue;
|
||||
|
||||
if (map__start(new_map) < map__start(old_map)) {
|
||||
/*
|
||||
* |new......
|
||||
* |old....
|
||||
*/
|
||||
if (map__end(new_map) < map__end(old_map)) {
|
||||
/*
|
||||
* |new......| -> |new..|
|
||||
* |old....| -> |old....|
|
||||
*/
|
||||
map__set_end(new_map, map__start(old_map));
|
||||
} else {
|
||||
/*
|
||||
* |new.............| -> |new..| |new..|
|
||||
* |old....| -> |old....|
|
||||
*/
|
||||
struct map_list_node *m = map_list_node__new();
|
||||
|
||||
if (!m) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
m->map = map__clone(new_map);
|
||||
if (!m->map) {
|
||||
free(m);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
map__set_end(m->map, map__start(old_map));
|
||||
list_add_tail(&m->node, &merged);
|
||||
map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
|
||||
map__set_start(new_map, map__end(old_map));
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* |new......
|
||||
* |old....
|
||||
*/
|
||||
if (map__end(new_map) < map__end(old_map)) {
|
||||
/*
|
||||
* |new..| -> x
|
||||
* |old.........| -> |old.........|
|
||||
*/
|
||||
map__put(new_map);
|
||||
new_map = NULL;
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* |new......| -> |new...|
|
||||
* |old....| -> |old....|
|
||||
*/
|
||||
map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
|
||||
map__set_start(new_map, map__end(old_map));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
while (!list_empty(&merged)) {
|
||||
struct map_list_node *old_node;
|
||||
|
||||
old_node = list_entry(merged.next, struct map_list_node, node);
|
||||
list_del_init(&old_node->node);
|
||||
if (!err)
|
||||
err = maps__insert(kmaps, old_node->map);
|
||||
map__put(old_node->map);
|
||||
free(old_node);
|
||||
}
|
||||
|
||||
if (new_map) {
|
||||
if (!err)
|
||||
err = maps__insert(kmaps, new_map);
|
||||
map__put(new_map);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,16 @@ struct map_rb_node {
|
|||
struct map *map;
|
||||
};
|
||||
|
||||
struct map_list_node {
|
||||
struct list_head node;
|
||||
struct map *map;
|
||||
};
|
||||
|
||||
static inline struct map_list_node *map_list_node__new(void)
|
||||
{
|
||||
return malloc(sizeof(struct map_list_node));
|
||||
}
|
||||
|
||||
struct map_rb_node *maps__first(struct maps *maps);
|
||||
struct map_rb_node *map_rb_node__next(struct map_rb_node *node);
|
||||
struct map_rb_node *maps__find_node(struct maps *maps, struct map *map);
|
||||
|
@ -133,4 +143,6 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map);
|
|||
|
||||
void __maps__sort_by_name(struct maps *maps);
|
||||
|
||||
void maps__fixup_end(struct maps *maps);
|
||||
|
||||
#endif // __PERF_MAPS_H
|
||||
|
|
|
@ -48,11 +48,6 @@ static bool symbol__is_idle(const char *name);
|
|||
int vmlinux_path__nr_entries;
|
||||
char **vmlinux_path;
|
||||
|
||||
struct map_list_node {
|
||||
struct list_head node;
|
||||
struct map *map;
|
||||
};
|
||||
|
||||
struct symbol_conf symbol_conf = {
|
||||
.nanosecs = false,
|
||||
.use_modules = true,
|
||||
|
@ -90,11 +85,6 @@ static enum dso_binary_type binary_type_symtab[] = {
|
|||
|
||||
#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
|
||||
|
||||
static struct map_list_node *map_list_node__new(void)
|
||||
{
|
||||
return malloc(sizeof(struct map_list_node));
|
||||
}
|
||||
|
||||
static bool symbol_type__filter(char symbol_type)
|
||||
{
|
||||
symbol_type = toupper(symbol_type);
|
||||
|
@ -270,29 +260,6 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
|
|||
curr->end = roundup(curr->start, 4096) + 4096;
|
||||
}
|
||||
|
||||
void maps__fixup_end(struct maps *maps)
|
||||
{
|
||||
struct map_rb_node *prev = NULL, *curr;
|
||||
|
||||
down_write(maps__lock(maps));
|
||||
|
||||
maps__for_each_entry(maps, curr) {
|
||||
if (prev != NULL && !map__end(prev->map))
|
||||
map__set_end(prev->map, map__start(curr->map));
|
||||
|
||||
prev = curr;
|
||||
}
|
||||
|
||||
/*
|
||||
* We still haven't the actual symbols, so guess the
|
||||
* last map final address.
|
||||
*/
|
||||
if (curr && !map__end(curr->map))
|
||||
map__set_end(curr->map, ~0ULL);
|
||||
|
||||
up_write(maps__lock(maps));
|
||||
}
|
||||
|
||||
struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
|
||||
{
|
||||
size_t namelen = strlen(name) + 1;
|
||||
|
@ -1270,103 +1237,6 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Merges map into maps by splitting the new map within the existing map
|
||||
* regions.
|
||||
*/
|
||||
int maps__merge_in(struct maps *kmaps, struct map *new_map)
|
||||
{
|
||||
struct map_rb_node *rb_node;
|
||||
LIST_HEAD(merged);
|
||||
int err = 0;
|
||||
|
||||
maps__for_each_entry(kmaps, rb_node) {
|
||||
struct map *old_map = rb_node->map;
|
||||
|
||||
/* no overload with this one */
|
||||
if (map__end(new_map) < map__start(old_map) ||
|
||||
map__start(new_map) >= map__end(old_map))
|
||||
continue;
|
||||
|
||||
if (map__start(new_map) < map__start(old_map)) {
|
||||
/*
|
||||
* |new......
|
||||
* |old....
|
||||
*/
|
||||
if (map__end(new_map) < map__end(old_map)) {
|
||||
/*
|
||||
* |new......| -> |new..|
|
||||
* |old....| -> |old....|
|
||||
*/
|
||||
map__set_end(new_map, map__start(old_map));
|
||||
} else {
|
||||
/*
|
||||
* |new.............| -> |new..| |new..|
|
||||
* |old....| -> |old....|
|
||||
*/
|
||||
struct map_list_node *m = map_list_node__new();
|
||||
|
||||
if (!m) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
m->map = map__clone(new_map);
|
||||
if (!m->map) {
|
||||
free(m);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
map__set_end(m->map, map__start(old_map));
|
||||
list_add_tail(&m->node, &merged);
|
||||
map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
|
||||
map__set_start(new_map, map__end(old_map));
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* |new......
|
||||
* |old....
|
||||
*/
|
||||
if (map__end(new_map) < map__end(old_map)) {
|
||||
/*
|
||||
* |new..| -> x
|
||||
* |old.........| -> |old.........|
|
||||
*/
|
||||
map__put(new_map);
|
||||
new_map = NULL;
|
||||
break;
|
||||
} else {
|
||||
/*
|
||||
* |new......| -> |new...|
|
||||
* |old....| -> |old....|
|
||||
*/
|
||||
map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
|
||||
map__set_start(new_map, map__end(old_map));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
while (!list_empty(&merged)) {
|
||||
struct map_list_node *old_node;
|
||||
|
||||
old_node = list_entry(merged.next, struct map_list_node, node);
|
||||
list_del_init(&old_node->node);
|
||||
if (!err)
|
||||
err = maps__insert(kmaps, old_node->map);
|
||||
map__put(old_node->map);
|
||||
free(old_node);
|
||||
}
|
||||
|
||||
if (new_map) {
|
||||
if (!err)
|
||||
err = maps__insert(kmaps, new_map);
|
||||
map__put(new_map);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dso__load_kcore(struct dso *dso, struct map *map,
|
||||
const char *kallsyms_filename)
|
||||
{
|
||||
|
@ -2065,124 +1935,6 @@ int dso__load(struct dso *dso, struct map *map)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int map__strcmp(const void *a, const void *b)
|
||||
{
|
||||
const struct map *map_a = *(const struct map **)a;
|
||||
const struct map *map_b = *(const struct map **)b;
|
||||
const struct dso *dso_a = map__dso(map_a);
|
||||
const struct dso *dso_b = map__dso(map_b);
|
||||
int ret = strcmp(dso_a->short_name, dso_b->short_name);
|
||||
|
||||
if (ret == 0 && map_a != map_b) {
|
||||
/*
|
||||
* Ensure distinct but name equal maps have an order in part to
|
||||
* aid reference counting.
|
||||
*/
|
||||
ret = (int)map__start(map_a) - (int)map__start(map_b);
|
||||
if (ret == 0)
|
||||
ret = (int)((intptr_t)map_a - (intptr_t)map_b);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int map__strcmp_name(const void *name, const void *b)
|
||||
{
|
||||
const struct dso *dso = map__dso(*(const struct map **)b);
|
||||
|
||||
return strcmp(name, dso->short_name);
|
||||
}
|
||||
|
||||
void __maps__sort_by_name(struct maps *maps)
|
||||
{
|
||||
qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
|
||||
}
|
||||
|
||||
static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
|
||||
{
|
||||
struct map_rb_node *rb_node;
|
||||
struct map **maps_by_name = realloc(maps__maps_by_name(maps),
|
||||
maps__nr_maps(maps) * sizeof(struct map *));
|
||||
int i = 0;
|
||||
|
||||
if (maps_by_name == NULL)
|
||||
return -1;
|
||||
|
||||
up_read(maps__lock(maps));
|
||||
down_write(maps__lock(maps));
|
||||
|
||||
RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
|
||||
RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
|
||||
|
||||
maps__for_each_entry(maps, rb_node)
|
||||
maps_by_name[i++] = map__get(rb_node->map);
|
||||
|
||||
__maps__sort_by_name(maps);
|
||||
|
||||
up_write(maps__lock(maps));
|
||||
down_read(maps__lock(maps));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct map *__maps__find_by_name(struct maps *maps, const char *name)
|
||||
{
|
||||
struct map **mapp;
|
||||
|
||||
if (maps__maps_by_name(maps) == NULL &&
|
||||
map__groups__sort_by_name_from_rbtree(maps))
|
||||
return NULL;
|
||||
|
||||
mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
|
||||
sizeof(*mapp), map__strcmp_name);
|
||||
if (mapp)
|
||||
return *mapp;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct map *maps__find_by_name(struct maps *maps, const char *name)
|
||||
{
|
||||
struct map_rb_node *rb_node;
|
||||
struct map *map;
|
||||
|
||||
down_read(maps__lock(maps));
|
||||
|
||||
|
||||
if (RC_CHK_ACCESS(maps)->last_search_by_name) {
|
||||
const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
|
||||
|
||||
if (strcmp(dso->short_name, name) == 0) {
|
||||
map = RC_CHK_ACCESS(maps)->last_search_by_name;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If we have maps->maps_by_name, then the name isn't in the rbtree,
|
||||
* as maps->maps_by_name mirrors the rbtree when lookups by name are
|
||||
* made.
|
||||
*/
|
||||
map = __maps__find_by_name(maps, name);
|
||||
if (map || maps__maps_by_name(maps) != NULL)
|
||||
goto out_unlock;
|
||||
|
||||
/* Fallback to traversing the rbtree... */
|
||||
maps__for_each_entry(maps, rb_node) {
|
||||
struct dso *dso;
|
||||
|
||||
map = rb_node->map;
|
||||
dso = map__dso(map);
|
||||
if (strcmp(dso->short_name, name) == 0) {
|
||||
RC_CHK_ACCESS(maps)->last_search_by_name = map;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
map = NULL;
|
||||
|
||||
out_unlock:
|
||||
up_read(maps__lock(maps));
|
||||
return map;
|
||||
}
|
||||
|
||||
int dso__load_vmlinux(struct dso *dso, struct map *map,
|
||||
const char *vmlinux, bool vmlinux_allocated)
|
||||
{
|
||||
|
|
|
@ -189,7 +189,6 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym,
|
|||
void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym);
|
||||
void symbols__fixup_duplicate(struct rb_root_cached *symbols);
|
||||
void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms);
|
||||
void maps__fixup_end(struct maps *maps);
|
||||
|
||||
typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
|
||||
int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
|
||||
|
|
Loading…
Reference in a new issue