perf map_groups: Introduce for_each_entry() and for_each_entry_safe() iterators

To reduce boilerplate, providing a more compact form to iterate over the
maps in a map_group.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lkml.kernel.org/n/tip-gc3go6fmdn30twusg91t2q56@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Arnaldo Carvalho de Melo 2019-10-28 11:55:28 -03:00
parent 8efc4f0568
commit 50481461cf
4 changed files with 14 additions and 32 deletions

View file

@ -18,17 +18,16 @@ static int check_maps(struct map_def *merged, unsigned int size, struct map_grou
struct map *map; struct map *map;
unsigned int i = 0; unsigned int i = 0;
map = map_groups__first(mg); map_groups__for_each_entry(mg, map) {
while (map) { if (i > 0)
TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
TEST_ASSERT_VAL("wrong map start", map->start == merged[i].start); TEST_ASSERT_VAL("wrong map start", map->start == merged[i].start);
TEST_ASSERT_VAL("wrong map end", map->end == merged[i].end); TEST_ASSERT_VAL("wrong map end", map->end == merged[i].end);
TEST_ASSERT_VAL("wrong map name", !strcmp(map->dso->name, merged[i].name)); TEST_ASSERT_VAL("wrong map name", !strcmp(map->dso->name, merged[i].name));
TEST_ASSERT_VAL("wrong map refcnt", refcount_read(&map->refcnt) == 2); TEST_ASSERT_VAL("wrong map refcnt", refcount_read(&map->refcnt) == 2);
i++; i++;
map = map_groups__next(map);
TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
} }
return TEST_OK; return TEST_OK;

View file

@ -89,12 +89,11 @@ static inline struct map *map_groups__find(struct map_groups *mg, u64 addr)
return maps__find(&mg->maps, addr); return maps__find(&mg->maps, addr);
} }
struct map *map_groups__first(struct map_groups *mg); #define map_groups__for_each_entry(mg, map) \
for (map = maps__first(&mg->maps); map; map = map__next(map))
static inline struct map *map_groups__next(struct map *map) #define map_groups__for_each_entry_safe(mg, map, next) \
{ for (map = maps__first(&mg->maps), next = map__next(map); map; map = next, next = map__next(map))
return map__next(map);
}
struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp); struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp);
struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp); struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp);

View file

@ -1049,11 +1049,6 @@ int compare_proc_modules(const char *from, const char *to)
return ret; return ret;
} }
struct map *map_groups__first(struct map_groups *mg)
{
return maps__first(&mg->maps);
}
static int do_validate_kcore_modules(const char *filename, static int do_validate_kcore_modules(const char *filename,
struct map_groups *kmaps) struct map_groups *kmaps)
{ {
@ -1065,13 +1060,10 @@ static int do_validate_kcore_modules(const char *filename,
if (err) if (err)
return err; return err;
old_map = map_groups__first(kmaps); map_groups__for_each_entry(kmaps, old_map) {
while (old_map) {
struct map *next = map_groups__next(old_map);
struct module_info *mi; struct module_info *mi;
if (!__map__is_kmodule(old_map)) { if (!__map__is_kmodule(old_map)) {
old_map = next;
continue; continue;
} }
@ -1081,8 +1073,6 @@ static int do_validate_kcore_modules(const char *filename,
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
old_map = next;
} }
out: out:
delete_modules(&modules); delete_modules(&modules);
@ -1185,9 +1175,7 @@ int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map)
struct map *old_map; struct map *old_map;
LIST_HEAD(merged); LIST_HEAD(merged);
for (old_map = map_groups__first(kmaps); old_map; map_groups__for_each_entry(kmaps, old_map) {
old_map = map_groups__next(old_map)) {
/* no overload with this one */ /* no overload with this one */
if (new_map->end < old_map->start || if (new_map->end < old_map->start ||
new_map->start >= old_map->end) new_map->start >= old_map->end)
@ -1260,7 +1248,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
{ {
struct map_groups *kmaps = map__kmaps(map); struct map_groups *kmaps = map__kmaps(map);
struct kcore_mapfn_data md; struct kcore_mapfn_data md;
struct map *old_map, *new_map, *replacement_map = NULL; struct map *old_map, *new_map, *replacement_map = NULL, *next;
struct machine *machine; struct machine *machine;
bool is_64_bit; bool is_64_bit;
int err, fd; int err, fd;
@ -1307,10 +1295,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
} }
/* Remove old maps */ /* Remove old maps */
old_map = map_groups__first(kmaps); map_groups__for_each_entry_safe(kmaps, old_map, next) {
while (old_map) {
struct map *next = map_groups__next(old_map);
/* /*
* We need to preserve eBPF maps even if they are * We need to preserve eBPF maps even if they are
* covered by kcore, because we need to access * covered by kcore, because we need to access
@ -1318,7 +1303,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
*/ */
if (old_map != map && !__map__is_bpf_prog(old_map)) if (old_map != map && !__map__is_bpf_prog(old_map))
map_groups__remove(kmaps, old_map); map_groups__remove(kmaps, old_map);
old_map = next;
} }
machine->trampolines_mapped = false; machine->trampolines_mapped = false;

View file

@ -142,9 +142,9 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
struct thread *thread) struct thread *thread)
{ {
enum dso_type dso_type = DSO__TYPE_UNKNOWN; enum dso_type dso_type = DSO__TYPE_UNKNOWN;
struct map *map = map_groups__first(thread->mg); struct map *map;
for (; map ; map = map_groups__next(map)) { map_groups__for_each_entry(thread->mg, map) {
struct dso *dso = map->dso; struct dso *dso = map->dso;
if (!dso || dso->long_name[0] != '/') if (!dso || dso->long_name[0] != '/')
continue; continue;